source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
parallel_for.h | /*!
* Copyright (c) 2021 by Contributors
* \file runtime/container.h
* \brief Defines the container object data structures.
*/
#ifndef DGL_RUNTIME_PARALLEL_FOR_H_
#define DGL_RUNTIME_PARALLEL_FOR_H_
#include <dmlc/omp.h>
#include <algorithm>
#include <string>
#include <cstdlib>
#include <exception>
#include <vector>
#include <atomic>
namespace {
int64_t divup(int64_t x, int64_t y) {
return (x + y - 1) / y;
}
}
namespace dgl {
namespace runtime {
namespace {
size_t compute_num_threads(size_t begin, size_t end, size_t grain_size) {
if (omp_in_parallel() || end - begin <= grain_size || end - begin == 1)
return 1;
return std::min(static_cast<int64_t>(omp_get_max_threads()), divup(end - begin, grain_size));
}
struct DefaultGrainSizeT {
size_t grain_size;
DefaultGrainSizeT() {
auto var = std::getenv("DGL_PARALLEL_FOR_GRAIN_SIZE");
if (!var) {
grain_size = 1;
} else {
grain_size = std::stoul(var);
}
}
size_t operator()() {
return grain_size;
}
};
} // namespace
static DefaultGrainSizeT default_grain_size;
/*!
* \brief OpenMP-based parallel for loop.
*
* It requires each thread's workload to have at least \a grain_size elements.
* The loop body will be a function that takes in two arguments \a begin and \a end, which
* stands for the starting (inclusive) and ending index (exclusive) of the workload.
*/
template <typename F>
void parallel_for(
const size_t begin,
const size_t end,
const size_t grain_size,
F&& f) {
if (begin >= end) {
return;
}
#ifdef _OPENMP
auto num_threads = compute_num_threads(begin, end, grain_size);
// (BarclayII) the exception code is borrowed from PyTorch.
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel num_threads(num_threads)
{
auto tid = omp_get_thread_num();
auto chunk_size = divup((end - begin), num_threads);
auto begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
auto end_tid = std::min(end, chunk_size + begin_tid);
try {
f(begin_tid, end_tid);
} catch (...) {
if (!err_flag.test_and_set())
eptr = std::current_exception();
}
}
}
if (eptr)
std::rethrow_exception(eptr);
#else
f(begin, end);
#endif
}
/*!
* \brief OpenMP-based parallel for loop with default grain size.
*
* parallel_for with grain size to default value, either 1 or controlled through
* environment variable DGL_PARALLEL_FOR_GRAIN_SIZE.
* If grain size is set to 1, the function behaves the same way as OpenMP
* parallel for pragma with static scheduling.
*/
template <typename F>
void parallel_for(
const size_t begin,
const size_t end,
F&& f) {
parallel_for(begin, end, default_grain_size(), std::forward<F>(f));
}
/*!
* \brief OpenMP-based two-stage parallel reduction.
*
* The first-stage reduction function \a f works in parallel. Each thread's workload has
* at least \a grain_size elements. The loop body will be a function that takes in
* the starting index (inclusive), the ending index (exclusive), and the reduction identity.
*
* The second-stage reduction function \a sf is a binary function working in the main
* thread. It aggregates the partially reduced result computed from each thread.
*
* Example to compute a parallelized max reduction of an array \c a:
*
* parallel_reduce(
* 0, // starting index
* 100, // ending index
* 1, // grain size
* -std::numeric_limits<float>::infinity, // identity
* [&a] (int begin, int end, float ident) { // first-stage partial reducer
* float result = ident;
* for (int i = begin; i < end; ++i)
* result = std::max(result, a[i]);
* return result;
* },
* [] (float result, float partial_result) {
* return std::max(result, partial_result);
* });
*/
template <typename DType, typename F, typename SF>
DType parallel_reduce(
const size_t begin,
const size_t end,
const size_t grain_size,
const DType ident,
const F& f,
const SF& sf) {
if (begin >= end) {
return ident;
}
int num_threads = compute_num_threads(begin, end, grain_size);
if (num_threads == 1) {
return f(begin, end, ident);
}
std::vector<DType> results(num_threads, ident);
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel num_threads(num_threads)
{
auto tid = omp_get_thread_num();
auto chunk_size = divup((end - begin), num_threads);
auto begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
auto end_tid = std::min(end, static_cast<size_t>(chunk_size + begin_tid));
try {
results[tid] = f(begin_tid, end_tid, ident);
} catch (...) {
if (!err_flag.test_and_set())
eptr = std::current_exception();
}
}
}
if (eptr)
std::rethrow_exception(eptr);
DType out = ident;
for (int64_t i = 0; i < num_threads; ++i)
out = sf(out, results[i]);
return out;
}
} // namespace runtime
} // namespace dgl
#endif // DGL_RUNTIME_PARALLEL_FOR_H_
|
GB_unop__isfinite_bool_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isfinite_bool_fp32)
// op(A') function: GB (_unop_tran__isfinite_bool_fp32)
// C type: bool
// A type: float
// cast: float cij = (aij)
// unaryop: cij = isfinite (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isfinite (x) ;
// casting
#define GB_CAST(z, aij) \
float z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (aij) ; \
Cx [pC] = isfinite (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isfinite_bool_fp32)
(
bool *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isfinite (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isfinite (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isfinite_bool_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
spacetime_heat_initial_m0_kernel_antiderivative.h | /*
Copyright (c) 2020, VSB - Technical University of Ostrava and Graz University of
Technology
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the names of VSB - Technical University of Ostrava and Graz
University of Technology nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS”
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL VSB - TECHNICAL UNIVERSITY OF OSTRAVA AND
GRAZ UNIVERSITY OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** @file spacetime_heat_initial_kernel_antiderivative.h
* @brief
*/
#ifndef INCLUDE_BESTHEA_SPACETIME_HEAT_INITIAL_M0_KERNEL_ANTIDERIVATIVE_H_
#define INCLUDE_BESTHEA_SPACETIME_HEAT_INITIAL_M0_KERNEL_ANTIDERIVATIVE_H_
#include <besthea/spacetime_heat_initial_kernel_antiderivative.h>
#include "besthea/settings.h"
#include <vector>
namespace besthea {
namespace bem {
class spacetime_heat_initial_m0_kernel_antiderivative;
}
}
/**
* Class representing a first and second antiderivative of the double-layer
* spacetime kernel.
*/
class besthea::bem::spacetime_heat_initial_m0_kernel_antiderivative
: public besthea::bem::spacetime_heat_initial_kernel_antiderivative<
spacetime_heat_initial_m0_kernel_antiderivative > {
public:
/**
* Constructor.
* @param[in] alpha Heat conductivity.
*/
spacetime_heat_initial_m0_kernel_antiderivative( sc alpha )
: spacetime_heat_initial_kernel_antiderivative<
spacetime_heat_initial_m0_kernel_antiderivative >( alpha ) {
}
/**
* Destructor.
*/
virtual ~spacetime_heat_initial_m0_kernel_antiderivative( ) {
}
/**
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] t `t`.
*/
#pragma omp declare simd uniform( this, nx, t ) simdlen( DATA_WIDTH )
sc do_anti_t_regular(
sc xy1, sc xy2, sc xy3, [[maybe_unused]] const sc * nx, sc t ) const {
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
sc sqrt_d = std::sqrt( t );
sc value = -std::erf( norm / ( _two * _sqrt_alpha * sqrt_d ) )
/ ( _four * _pi * _alpha * norm );
return value;
}
/**
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
*/
#pragma omp declare simd uniform( this, nx ) simdlen( DATA_WIDTH )
sc do_anti_t_limit(
sc xy1, sc xy2, sc xy3, [[maybe_unused]] const sc * nx ) const {
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
sc value = -_one / ( _four * _pi * _alpha * norm );
return value;
}
};
#endif /* INCLUDE_BESTHEA_SPACETIME_HEAT_INITIAL_M0_KERNEL_ANTIDERIVATIVE_H_ \
*/
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-11,12)),ceild(4*t2-Nz-20,24));t3<=min(min(min(floord(4*t2+Ny,24),floord(Nt+Ny-4,24)),floord(2*t1+Ny+1,24)),floord(4*t1-4*t2+Nz+Ny-1,24));t3++) {
for (t4=max(max(max(0,ceild(t1-511,512)),ceild(4*t2-Nz-1020,1024)),ceild(24*t3-Ny-1020,1024));t4<=min(min(min(min(floord(4*t2+Nx,1024),floord(Nt+Nx-4,1024)),floord(2*t1+Nx+1,1024)),floord(24*t3+Nx+20,1024)),floord(4*t1-4*t2+Nz+Nx-1,1024));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),24*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),24*t3+22),1024*t4+1022),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2053
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(image,token) \
{ \
(void) ThrowMagickException(&(image)->exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *);
static ssize_t
TracePath(Image *,MVGInfo *,const char *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
&draw_info->fill_pattern->exception);
else
if (draw_info->tile != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->tile,0,0,MagickTrue,
&draw_info->tile->exception);
clone_info->tile=NewImageList(); /* tile is deprecated */
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,&draw_info->stroke_pattern->exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_opacity=draw_info->fill_opacity;
clone_info->stroke_opacity=draw_info->stroke_opacity;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,&draw_info->clipping_mask->exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,&draw_info->composite_mask->exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(
const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
magick_unreferenced(draw_info);
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case PointPrimitive:
case ColorPrimitive:
case MattePrimitive:
case TextPrimitive:
case ImagePrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->tile != (Image *) NULL)
draw_info->tile=DestroyImage(draw_info->tile);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
extent[4],
min,
max,
point;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetMagickPixelPacket(image,&zero);
exception=(&image->exception);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
MagickPixelPacket
composite,
pixel;
PointInfo
point;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolateMagickPixelPacket(source,source_view,
UndefinedInterpolatePixel,point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
SetMagickPixelPacket(image,q,indexes+x_offset,&composite);
MagickPixelCompositeOver(&pixel,pixel.opacity,&composite,
composite.opacity,&composite);
SetPixelPacket(image,&composite,q,indexes+x_offset);
x_offset++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
*/
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorDatabase("#0000",&clone_info->fill,&image->exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorDatabase("#f00",&clone_info->stroke,
&image->exception);
else
status=QueryColorDatabase("#0f0",&clone_info->stroke,
&image->exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorDatabase("#00f",&clone_info->stroke,&image->exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
&image->exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageClipMask(image,clipping_mask);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL);
status=SetImageExtent(clip_mask,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageClipMask(image,(Image *) NULL);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.opacity=(Quantum) TransparentOpacity;
status=SetImageBackgroundColor(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
(void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->opacity=OpaqueOpacity;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
status&=SeparateImageChannel(clip_mask,TrueAlphaChannel);
if (draw_info->compliance != SVGCompliance)
status&=NegateImage(clip_mask,MagickFalse);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL);
status=SetImageExtent(composite_mask,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(image,(Image *) NULL);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->opacity=OpaqueOpacity;
status=RenderMVGContent(composite_mask,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
status&=SeparateImageChannel(composite_mask,TrueAlphaChannel);
status&=NegateImage(composite_mask,MagickFalse);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (double) (MaxBezierCoordinates >> 2))
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
MagickPixelPacket
composite,
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
i,
x;
register PixelPacket
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,(double) gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,
(double) gradient->radius);
else
repeat=fmod(offset,(double) gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
MagickPixelCompositeOver(&composite,composite.opacity,&pixel,
pixel.opacity,&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad+1;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
static inline double GetDrawValue(const char *magick_restrict string,
char **magick_restrict sentinal)
{
char
**magick_restrict q;
double
value;
q=sentinal;
value=InterpretLocaleValue(string,q);
if ((IsNaN(value) != 0) || (value < -((double) SSIZE_MAX-512.0)) ||
(value > ((double) SSIZE_MAX-512.0)))
return(0.0);
sentinal=q;
return(value);
}
static int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=GetDrawValue(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
key[2*MaxTextExtent],
keyword[MaxTextExtent],
geometry[MaxTextExtent],
name[MaxTextExtent],
*next_token,
pattern[MaxTextExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PixelPacket
start_color;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryImageException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel);
if (status == MagickFalse)
return(MagickFalse);
}
primitive=(char *) NULL;
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,&image->exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=(&image->exception);
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MaxTextExtent;
cursor=0.0;
defsDepth=0;
symbolDepth=0;
macros=GetMVGMacros(primitive);
status=QueryColorDatabase("#000000",&start_color,&image->exception);
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MaxTextExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&graphic_context[n]->border_color,
&image->exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if ((mvg_class != (const char *) NULL) && (p > primitive))
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,&image->exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
if (LocaleCompare("currentColor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern);
else
{
status&=QueryColorDatabase(token,&graphic_context[n]->fill,
&image->exception);
if (graphic_context[n]->fill_opacity != OpaqueOpacity)
graphic_context[n]->fill.opacity=ClampToQuantum(
graphic_context[n]->fill_opacity);
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_opacity*=(1.0-opacity);
else
graphic_context[n]->fill_opacity=(QuantumRange-
graphic_context[n]->fill_opacity)*(1.0-opacity);
if (graphic_context[n]->fill.opacity != TransparentOpacity)
graphic_context[n]->fill.opacity=(Quantum)
graphic_context[n]->fill_opacity;
else
graphic_context[n]->fill.opacity=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics);
graphic_context[n]->kerning=metrics.width*
GetDrawValue(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,&image->exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,graphic_context[n]->composite_mask);
}
break;
}
if (LocaleCompare("matte",keyword) == 0)
{
primitive_type=MattePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_opacity*=(1.0-opacity);
graphic_context[n]->stroke_opacity*=(1.0-opacity);
}
else
{
graphic_context[n]->fill_opacity=(QuantumRange-
graphic_context[n]->fill_opacity)*(1.0-opacity);
graphic_context[n]->stroke_opacity=(QuantumRange-
graphic_context[n]->stroke_opacity)*(1.0-opacity);
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),DrawError,
"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageClipMask(image,(Image *) NULL);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MaxTextExtent],
name[MaxTextExtent],
type[MaxTextExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
GradientType
type;
PixelPacket
stop_color;
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&stop_color,&image->exception);
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,&start_color,&stop_color);
start_color=stop_color;
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern);
else
{
status&=QueryColorDatabase(token,&graphic_context[n]->stroke,
&image->exception);
if (graphic_context[n]->stroke_opacity != OpaqueOpacity)
graphic_context[n]->stroke.opacity=ClampToQuantum(
graphic_context[n]->stroke_opacity);
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*p;
p=q;
(void) GetNextToken(p,&p,extent,token);
if (*token == ',')
(void) GetNextToken(p,&p,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(p,&p,extent,token);
if (*token == ',')
(void) GetNextToken(p,&p,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_opacity*=(1.0-opacity);
else
graphic_context[n]->stroke_opacity=(QuantumRange-
graphic_context[n]->stroke_opacity)*(1.0-opacity);
if (graphic_context[n]->stroke.opacity != TransparentOpacity)
graphic_context[n]->stroke.opacity=(Quantum)
graphic_context[n]->stroke_opacity;
else
graphic_context[n]->stroke.opacity=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&graphic_context[n]->undercolor,
&image->exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(BezierQuantum*(double) primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (108.0*BezierQuantum))
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
DrawError,"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=GetDrawValue(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
double
dx,
dy,
maximum_length;
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates/100.0))
ThrowPointExpectedException(image,keyword);
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(image,&mvg_info,token);
if (coordinates < 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case ColorPrimitive:
case MattePrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (status == 0)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
/*
Sanity check.
*/
status&=CheckPrimitiveExtent(&mvg_info,(size_t)
ExpandAffine(&graphic_context[n]->affine));
if (status == 0)
break;
status&=CheckPrimitiveExtent(&mvg_info,(size_t)
graphic_context[n]->stroke_width);
if (status == 0)
break;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryImageException(DrawError,
"NonconformingDrawingPrimitiveDefinition",keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
{
return(RenderMVGContent(image,draw_info,0));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern)
{
char
property[MaxTextExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MaxTextExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info);
image_info=DestroyImageInfo(image_info);
(void) QueryColorDatabase("#00000000",&(*pattern)->background_color,
&image->exception);
(void) SetImageBackgroundColor(*pattern);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern);
(void) FormatLocaleString(property,MaxTextExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(draw_info,primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetOpacityPixel(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_opacity)
{
double
alpha,
beta,
distance,
subpath_opacity;
PointInfo
delta;
register EdgeInfo
*p;
register const PointInfo
*q;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_opacity=0.0;
subpath_opacity=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon;
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_opacity < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_opacity=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_opacity < ((alpha-0.25)*(alpha-0.25)))
*stroke_opacity=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_opacity >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_opacity=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_opacity < (alpha*alpha))
subpath_opacity=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_opacity >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_opacity);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
const char
*artifact;
double
mid;
ExceptionInfo
*exception;
MagickBooleanType
fill,
status;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
artifact=GetImageArtifact(image,"draw:render-bounding-rectangles");
if (IsStringTrue(artifact) != MagickFalse)
(void) DrawBoundingRectangles(image,draw_info,polygon_info[0]);
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
(void) GetFillColor(draw_info,x-start_x,y-start_y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
double
fill_opacity,
stroke_opacity;
PixelPacket
fill_color,
stroke_color;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
/*
Fill and/or stroke.
*/
fill_opacity=GetOpacityPixel(polygon_info[id],mid,fill,
draw_info->fill_rule,x,y,&stroke_opacity);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_opacity=fill_opacity > 0.5 ? 1.0 : 0.0;
stroke_opacity=stroke_opacity > 0.5 ? 1.0 : 0.0;
}
(void) GetFillColor(draw_info,x-start_x,y-start_y,&fill_color);
fill_opacity=(double) (QuantumRange-fill_opacity*(QuantumRange-
fill_color.opacity));
MagickCompositeOver(&fill_color,(MagickRealType) fill_opacity,q,
(MagickRealType) q->opacity,q);
(void) GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color);
stroke_opacity=(double) (QuantumRange-stroke_opacity*(QuantumRange-
stroke_color.opacity));
MagickCompositeOver(&stroke_color,(MagickRealType) stroke_opacity,q,
(MagickRealType) q->opacity,q);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case MattePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"MattePrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
exception=(&image->exception);
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelGray(&draw_info->fill) == MagickFalse) ||
(IsPixelGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageClipMask(image,draw_info->clipping_mask);
status&=SetImageMask(image,draw_info->composite_mask);
}
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelPacket
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,q);
q++;
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
status&=GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,DefaultChannels,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,q);
q++;
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case MattePrimitive:
{
if (image->matte == MagickFalse)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
pixel;
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelPacket
pixel,
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
status&=GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,OpacityChannel,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
PixelPacket
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MaxTextExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
&image->exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
status&=SetImageInfo(clone_info,0,exception);
if (clone_info->size != (char *) NULL)
clone_info->size=DestroyString(clone_info->size);
if (clone_info->extract != (char *) NULL)
clone_info->extract=DestroyString(clone_info->extract);
if ((LocaleNCompare(clone_info->magick,"http",4) == 0) ||
(LocaleCompare(clone_info->magick,"mpri") == 0))
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
char
geometry[MaxTextExtent];
/*
Resize image.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!",
primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,geometry);
}
if (composite_image->matte == MagickFalse)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel);
if (draw_info->opacity != OpaqueOpacity)
status&=SetImageOpacity(composite_image,draw_info->opacity);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,
&image->exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine);
else
status&=CompositeImage(image,draw_info->compose,composite_image,
geometry.x,geometry.y);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelPacket
fill_color;
PixelPacket
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&fill_color);
MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q,
(MagickRealType) q->opacity,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MaxTextExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.opacity != (Quantum) TransparentOpacity))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.opacity != (Quantum) TransparentOpacity) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageClipMask(image,(Image *) NULL);
status&=SetImageMask(image,(Image *) NULL);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,&clone_info->stroke_pattern->exception);
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p);
status&=DrawRoundLinecap(image,draw_info,q);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorDatabase("#000F",&draw_info->fill,exception);
(void) QueryColorDatabase("#FFF0",&draw_info->stroke,exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->opacity=OpaqueOpacity;
draw_info->fill_opacity=OpaqueOpacity;
draw_info->stroke_opacity=OpaqueOpacity;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
draw_info->pointsize=12.0;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->undercolor.opacity=(Quantum) TransparentOpacity;
draw_info->border_color=clone_info->border_color;
draw_info->compose=OverCompositeOp;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->fill,exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->stroke,exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->undercolor,exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
p=primitive_info;
status=MagickTrue;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (108.0*BezierQuantum))
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static ssize_t TracePath(Image *image,MVGInfo *mvg_info,const char *path)
{
char
*next_token,
token[MaxTextExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickStatusType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
arc.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
arc.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
status&=TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(image,token);
break;
}
}
}
if (status == MagickFalse)
return(-1);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return((ssize_t) number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((pad_p) > MaxBezierCoordinates) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((pad_q) > MaxBezierCoordinates) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x;
offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y;
closed_path=(fabs(offset.x) < MagickEpsilon) &&
(fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(MaxStrokePad,MaxStrokePad);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
convolution_3x3_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
bottom_blob_tm.create(tiles, 64, inch, 4u * elempack, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][4];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const unsigned short* r0 = img0.row<const unsigned short>(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0));
float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4));
float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8));
float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12));
float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16));
float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20));
float32x4_t _r06 = vcvt_f32_bf16(vld1_u16(r0 + 24));
float32x4_t _r07 = vcvt_f32_bf16(vld1_u16(r0 + 28));
float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f);
float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[7][m], _tmp7m);
// tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
// tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f);
// float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
// float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
// tmp[1][m] = tmp12a + tmp12b;
// tmp[2][m] = tmp12a - tmp12b;
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
// float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
// float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
// tmp[3][m] = tmp34a + tmp34b;
// tmp[4][m] = tmp34a - tmp34b;
float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
// float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
// float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(tmp[5][m], _tmp5m);
vst1q_f32(tmp[6][m], _tmp6m);
// tmp[5][m] = tmp56a + tmp56b;
// tmp[6][m] = tmp56a - tmp56b;
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
float* r0_tm_6 = r0_tm_0 + tiles * 24;
float* r0_tm_7 = r0_tm_0 + tiles * 28;
for (int m = 0; m < 8; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f);
float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f);
// r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
// r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f);
// float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
// float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25);
float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b);
// r0_tm[1] = tmp12a + tmp12b;
// r0_tm[2] = tmp12a - tmp12b;
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
// float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
// float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b);
// r0_tm[3] = tmp34a + tmp34b;
// r0_tm[4] = tmp34a - tmp34b;
float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
// float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
// float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b);
// r0_tm[5] = tmp56a + tmp56b;
// r0_tm[6] = tmp56a - tmp56b;
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
vst1q_f32(r0_tm_6, _r0tm6);
vst1q_f32(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 32;
r0_tm_1 += tiles * 32;
r0_tm_2 += tiles * 32;
r0_tm_3 += tiles * 32;
r0_tm_4 += tiles * 32;
r0_tm_5 += tiles * 32;
r0_tm_6 += tiles * 32;
r0_tm_7 += tiles * 32;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __aarch64__
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator);
#else
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
float* tm2p = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v8.4s}, [%1], #16 \n"
"sub %0, %0, #128 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v11.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
r0 += bottom_blob_tm.cstep * 4;
}
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8);
#else
float* tm2p = tm2.row(i / 8);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"sub %0, %0, #64 \n"
"st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d0-d7} \n"
"pld [%0, #512] \n"
"vldm %0, {d16-d23} \n"
// transpose 8x4
"vtrn.32 q0, q1 \n"
"vtrn.32 q2, q3 \n"
"vtrn.32 q8, q9 \n"
"vtrn.32 q10, q11 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vswp q1, q8 \n"
"vswp q3, q10 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"sub %0, %0, #64 \n"
"vst1.f32 {d4-d7}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
#endif
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n"
"vstm %1!, {d0-d7} \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i + 1 < tiles; i += 2)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.4s, v1.4s}, [%0] \n"
"st1 {v0.4s, v1.4s}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1");
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0 :128] \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i < tiles; i++)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
const Mat kernel01_tm = kernel_tm.channel(pp);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"fmla v20.4s, v5.4s, v0.s[0] \n"
"fmla v21.4s, v5.4s, v0.s[1] \n"
"fmla v22.4s, v5.4s, v0.s[2] \n"
"fmla v23.4s, v5.4s, v0.s[3] \n"
"fmla v24.4s, v5.4s, v1.s[0] \n"
"fmla v25.4s, v5.4s, v1.s[1] \n"
"fmla v26.4s, v5.4s, v1.s[2] \n"
"fmla v27.4s, v5.4s, v1.s[3] \n"
"fmla v28.4s, v5.4s, v2.s[0] \n"
"fmla v29.4s, v5.4s, v2.s[1] \n"
"fmla v30.4s, v5.4s, v2.s[2] \n"
"fmla v31.4s, v5.4s, v2.s[3] \n"
"fmla v8.4s, v6.4s, v3.s[0] \n"
"fmla v9.4s, v6.4s, v3.s[1] \n"
"fmla v10.4s, v6.4s, v3.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[3] \n"
"fmla v20.4s, v7.4s, v3.s[0] \n"
"fmla v21.4s, v7.4s, v3.s[1] \n"
"fmla v22.4s, v7.4s, v3.s[2] \n"
"fmla v23.4s, v7.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v12.4s, v6.4s, v0.s[0] \n"
"fmla v13.4s, v6.4s, v0.s[1] \n"
"fmla v14.4s, v6.4s, v0.s[2] \n"
"fmla v15.4s, v6.4s, v0.s[3] \n"
"fmla v16.4s, v6.4s, v1.s[0] \n"
"fmla v17.4s, v6.4s, v1.s[1] \n"
"fmla v18.4s, v6.4s, v1.s[2] \n"
"fmla v19.4s, v6.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v0.s[0] \n"
"fmla v25.4s, v7.4s, v0.s[1] \n"
"fmla v26.4s, v7.4s, v0.s[2] \n"
"fmla v27.4s, v7.4s, v0.s[3] \n"
"fmla v28.4s, v7.4s, v1.s[0] \n"
"fmla v29.4s, v7.4s, v1.s[1] \n"
"fmla v30.4s, v7.4s, v1.s[2] \n"
"fmla v31.4s, v7.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"fmla v20.4s, v5.4s, v2.s[0] \n"
"fmla v21.4s, v5.4s, v2.s[1] \n"
"fmla v22.4s, v5.4s, v2.s[2] \n"
"fmla v23.4s, v5.4s, v2.s[3] \n"
"fmla v24.4s, v5.4s, v3.s[0] \n"
"fmla v25.4s, v5.4s, v3.s[1] \n"
"fmla v26.4s, v5.4s, v3.s[2] \n"
"fmla v27.4s, v5.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v4.4s, v0.s[1] \n"
"fmla v18.4s, v4.4s, v0.s[2] \n"
"fmla v19.4s, v4.4s, v0.s[3] \n"
"fmla v28.4s, v5.4s, v0.s[0] \n"
"fmla v29.4s, v5.4s, v0.s[1] \n"
"fmla v30.4s, v5.4s, v0.s[2] \n"
"fmla v31.4s, v5.4s, v0.s[3] \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v6.4s, v1.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[2] \n"
"fmla v11.4s, v6.4s, v1.s[3] \n"
"fmla v12.4s, v6.4s, v2.s[0] \n"
"fmla v13.4s, v6.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v2.s[2] \n"
"fmla v15.4s, v6.4s, v2.s[3] \n"
"fmla v16.4s, v6.4s, v3.s[0] \n"
"fmla v17.4s, v6.4s, v3.s[1] \n"
"fmla v18.4s, v6.4s, v3.s[2] \n"
"fmla v19.4s, v6.4s, v3.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.4s, v7.4s, v1.s[0] \n"
"fmla v21.4s, v7.4s, v1.s[1] \n"
"fmla v22.4s, v7.4s, v1.s[2] \n"
"fmla v23.4s, v7.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v2.s[0] \n"
"fmla v25.4s, v7.4s, v2.s[1] \n"
"fmla v26.4s, v7.4s, v2.s[2] \n"
"fmla v27.4s, v7.4s, v2.s[3] \n"
"fmla v28.4s, v7.4s, v3.s[0] \n"
"fmla v29.4s, v7.4s, v3.s[1] \n"
"fmla v30.4s, v7.4s, v3.s[2] \n"
"fmla v31.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v24.4s, v9.4s, v0.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v9.4s, v2.s[0] \n"
"fmla v27.4s, v9.4s, v3.s[0] \n"
"fmla v28.4s, v9.4s, v4.s[0] \n"
"fmla v29.4s, v9.4s, v5.s[0] \n"
"fmla v30.4s, v9.4s, v6.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v10.4s, v4.s[1] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v6.s[1] \n"
"fmla v23.4s, v10.4s, v7.s[1] \n"
"fmla v24.4s, v11.4s, v0.s[1] \n"
"fmla v25.4s, v11.4s, v1.s[1] \n"
"fmla v26.4s, v11.4s, v2.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v11.4s, v4.s[1] \n"
"fmla v29.4s, v11.4s, v5.s[1] \n"
"fmla v30.4s, v11.4s, v6.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v12.4s, v5.s[2] \n"
"fmla v22.4s, v12.4s, v6.s[2] \n"
"fmla v23.4s, v12.4s, v7.s[2] \n"
"fmla v24.4s, v13.4s, v0.s[2] \n"
"fmla v25.4s, v13.4s, v1.s[2] \n"
"fmla v26.4s, v13.4s, v2.s[2] \n"
"fmla v27.4s, v13.4s, v3.s[2] \n"
"fmla v28.4s, v13.4s, v4.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v13.4s, v6.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v14.4s, v4.s[3] \n"
"fmla v21.4s, v14.4s, v5.s[3] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v14.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4s, v15.4s, v0.s[3] \n"
"fmla v25.4s, v15.4s, v1.s[3] \n"
"fmla v26.4s, v15.4s, v2.s[3] \n"
"fmla v27.4s, v15.4s, v3.s[3] \n"
"fmla v28.4s, v15.4s, v4.s[3] \n"
"fmla v29.4s, v15.4s, v5.s[3] \n"
"fmla v30.4s, v15.4s, v6.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v9.4s, v0.s[0] \n"
"fmla v21.4s, v9.4s, v1.s[0] \n"
"fmla v22.4s, v9.4s, v2.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v11.4s, v0.s[1] \n"
"fmla v21.4s, v11.4s, v1.s[1] \n"
"fmla v22.4s, v11.4s, v2.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v13.4s, v0.s[2] \n"
"fmla v21.4s, v13.4s, v1.s[2] \n"
"fmla v22.4s, v13.4s, v2.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v15.4s, v0.s[3] \n"
"fmla v21.4s, v15.4s, v1.s[3] \n"
"fmla v22.4s, v15.4s, v2.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v11.4s, v0.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v13.4s, v0.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v15.4s, v0.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n" // r0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
#if __aarch64__
const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2);
#else
const Mat kernel0_tm = kernel_tm.channel(p);
#endif
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"fmla v8.4s, v5.4s, v3.s[0] \n"
"fmla v9.4s, v5.4s, v3.s[1] \n"
"fmla v10.4s, v5.4s, v3.s[2] \n"
"fmla v11.4s, v5.4s, v3.s[3] \n"
"fmla v12.4s, v5.4s, v20.s[0] \n"
"fmla v13.4s, v5.4s, v20.s[1] \n"
"fmla v14.4s, v5.4s, v20.s[2] \n"
"fmla v15.4s, v5.4s, v20.s[3] \n"
"fmla v16.4s, v5.4s, v21.s[0] \n"
"fmla v17.4s, v5.4s, v21.s[1] \n"
"fmla v18.4s, v5.4s, v21.s[2] \n"
"fmla v19.4s, v5.4s, v21.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"fmla v8.4s, v6.4s, v22.s[0] \n"
"fmla v9.4s, v6.4s, v22.s[1] \n"
"fmla v10.4s, v6.4s, v22.s[2] \n"
"fmla v11.4s, v6.4s, v22.s[3] \n"
"fmla v12.4s, v6.4s, v23.s[0] \n"
"fmla v13.4s, v6.4s, v23.s[1] \n"
"fmla v14.4s, v6.4s, v23.s[2] \n"
"fmla v15.4s, v6.4s, v23.s[3] \n"
"fmla v16.4s, v6.4s, v24.s[0] \n"
"fmla v17.4s, v6.4s, v24.s[1] \n"
"fmla v18.4s, v6.4s, v24.s[2] \n"
"fmla v19.4s, v6.4s, v24.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v7.4s, v25.s[0] \n"
"fmla v9.4s, v7.4s, v25.s[1] \n"
"fmla v10.4s, v7.4s, v25.s[2] \n"
"fmla v11.4s, v7.4s, v25.s[3] \n"
"fmla v12.4s, v7.4s, v26.s[0] \n"
"fmla v13.4s, v7.4s, v26.s[1] \n"
"fmla v14.4s, v7.4s, v26.s[2] \n"
"fmla v15.4s, v7.4s, v26.s[3] \n"
"fmla v16.4s, v7.4s, v27.s[0] \n"
"fmla v17.4s, v7.4s, v27.s[1] \n"
"fmla v18.4s, v7.4s, v27.s[2] \n"
"fmla v19.4s, v7.4s, v27.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
#else
const float* r0 = bb2.row(i / 8);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"veor q12, q12 \n"
"veor q13, q13 \n"
"veor q14, q14 \n"
"veor q15, q15 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d4[0] \n"
"vmla.f32 q9, q7, d4[1] \n"
"vmla.f32 q10, q7, d5[0] \n"
"vmla.f32 q11, q7, d5[1] \n"
"vmla.f32 q12, q7, d6[0] \n"
"vmla.f32 q13, q7, d6[1] \n"
"vmla.f32 q14, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
"vstm %1!, {d24-d31} \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif
}
for (; i + 1 < tiles; i += 2)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17");
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9");
#endif
}
for (; i < tiles; i++)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16");
#else
asm volatile(
"veor q8, q8 \n"
"0: \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8");
#endif
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
float tmp[6][8][4];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
const float* output0_tm_6 = output0_tm_0 + tiles * 24;
const float* output0_tm_7 = output0_tm_0 + tiles * 28;
unsigned short* output0 = out0.row<unsigned short>(i * 6) + (j * 6) * 4;
// TODO neon optimize
for (int m = 0; m < 8; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _out0tm6 = vld1q_f32(output0_tm_6);
float32x4_t _out0tm7 = vld1q_f32(output0_tm_7);
float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2);
// float tmp024a = output0_tm[1] + output0_tm[2];
// float tmp135a = output0_tm[1] - output0_tm[2];
float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4);
// float tmp024b = output0_tm[3] + output0_tm[4];
// float tmp135b = output0_tm[3] - output0_tm[4];
float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6);
// float tmp024c = output0_tm[5] + output0_tm[6];
// float tmp135c = output0_tm[5] - output0_tm[6];
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f));
float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[4][m], _tmp4m);
// tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
// tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
// tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f));
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[5][m], _tmp5m);
// tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
// tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
// tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02);
// float tmp024a = tmp0[1] + tmp0[2];
// float tmp135a = tmp0[1] - tmp0[2];
float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04);
// float tmp024b = tmp0[3] + tmp0[4];
// float tmp135b = tmp0[3] - tmp0[4];
float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06);
float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06);
// float tmp024c = tmp0[5] + tmp0[6];
// float tmp135c = tmp0[5] - tmp0[6];
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1_u16(output0, vcvt_bf16_f32(_out00));
vst1_u16(output0 + 8, vcvt_bf16_f32(_out02));
vst1_u16(output0 + 16, vcvt_bf16_f32(_out04));
// output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
// output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
// output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)));
vst1_u16(output0 + 4, vcvt_bf16_f32(_out01));
vst1_u16(output0 + 12, vcvt_bf16_f32(_out03));
vst1_u16(output0 + 20, vcvt_bf16_f32(_out05));
// output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
// output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
// output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator);
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p).row<const unsigned short>(q);
#if __aarch64__
// 16 * 9
uint16x8_t _k00_01 = vld1q_u16(kptr);
uint16x8_t _k00_23 = vld1q_u16(kptr + 8);
uint16x8_t _k01_01 = vld1q_u16(kptr + 16);
uint16x8_t _k01_23 = vld1q_u16(kptr + 24);
uint16x8_t _k02_01 = vld1q_u16(kptr + 32);
uint16x8_t _k02_23 = vld1q_u16(kptr + 40);
uint16x8_t _k10_01 = vld1q_u16(kptr + 48);
uint16x8_t _k10_23 = vld1q_u16(kptr + 56);
uint16x8_t _k11_01 = vld1q_u16(kptr + 64);
uint16x8_t _k11_23 = vld1q_u16(kptr + 72);
uint16x8_t _k12_01 = vld1q_u16(kptr + 80);
uint16x8_t _k12_23 = vld1q_u16(kptr + 88);
uint16x8_t _k20_01 = vld1q_u16(kptr + 96);
uint16x8_t _k20_23 = vld1q_u16(kptr + 104);
uint16x8_t _k21_01 = vld1q_u16(kptr + 112);
uint16x8_t _k21_23 = vld1q_u16(kptr + 120);
uint16x8_t _k22_01 = vld1q_u16(kptr + 128);
uint16x8_t _k22_23 = vld1q_u16(kptr + 136);
#endif // __aarch64__
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0] \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%1], #64 \n" // r00 r01 r02 r03
"shll v0.4s, v4.4h, #16 \n"
"shll2 v1.4s, v4.8h, #16 \n"
"shll v2.4s, v5.4h, #16 \n"
"shll2 v3.4s, v5.8h, #16 \n"
"shll v4.4s, v6.4h, #16 \n"
"shll2 v5.4s, v6.8h, #16 \n"
"shll v6.4s, v7.4h, #16 \n"
"shll2 v7.4s, v7.8h, #16 \n"
"shll v8.4s, %8.4h, #16 \n"
"shll2 v9.4s, %8.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[0] \n"
"fmla v11.4s, v8.4s, v2.s[0] \n"
"fmla v12.4s, v8.4s, v4.s[0] \n"
"fmla v13.4s, v8.4s, v6.s[0] \n"
"fmla v10.4s, v9.4s, v0.s[1] \n"
"fmla v11.4s, v9.4s, v2.s[1] \n"
"fmla v12.4s, v9.4s, v4.s[1] \n"
"fmla v13.4s, v9.4s, v6.s[1] \n"
"shll v8.4s, %9.4h, #16 \n"
"shll2 v9.4s, %9.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v8.4s, v4.s[2] \n"
"fmla v13.4s, v8.4s, v6.s[2] \n"
"fmla v10.4s, v9.4s, v0.s[3] \n"
"fmla v11.4s, v9.4s, v2.s[3] \n"
"fmla v12.4s, v9.4s, v4.s[3] \n"
"fmla v13.4s, v9.4s, v6.s[3] \n"
"shll v8.4s, %10.4h, #16 \n"
"shll2 v9.4s, %10.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[0] \n"
"fmla v11.4s, v8.4s, v3.s[0] \n"
"fmla v12.4s, v8.4s, v5.s[0] \n"
"fmla v13.4s, v8.4s, v7.s[0] \n"
"fmla v10.4s, v9.4s, v1.s[1] \n"
"fmla v11.4s, v9.4s, v3.s[1] \n"
"fmla v12.4s, v9.4s, v5.s[1] \n"
"fmla v13.4s, v9.4s, v7.s[1] \n"
"shll v8.4s, %11.4h, #16 \n"
"shll2 v9.4s, %11.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v8.4s, v5.s[2] \n"
"fmla v13.4s, v8.4s, v7.s[2] \n"
"fmla v10.4s, v9.4s, v1.s[3] \n"
"fmla v11.4s, v9.4s, v3.s[3] \n"
"fmla v12.4s, v9.4s, v5.s[3] \n"
"fmla v13.4s, v9.4s, v7.s[3] \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1] \n" // r08
"shll v0.4s, v0.4h, #16 \n"
"shll v8.4s, %12.4h, #16 \n"
"shll2 v9.4s, %12.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[0] \n"
"fmla v11.4s, v8.4s, v4.s[0] \n"
"fmla v12.4s, v8.4s, v6.s[0] \n"
"fmla v13.4s, v8.4s, v0.s[0] \n"
"fmla v10.4s, v9.4s, v2.s[1] \n"
"fmla v11.4s, v9.4s, v4.s[1] \n"
"fmla v12.4s, v9.4s, v6.s[1] \n"
"fmla v13.4s, v9.4s, v0.s[1] \n"
"shll v8.4s, %13.4h, #16 \n"
"shll2 v9.4s, %13.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v8.4s, v6.s[2] \n"
"fmla v13.4s, v8.4s, v0.s[2] \n"
"fmla v10.4s, v9.4s, v2.s[3] \n"
"fmla v11.4s, v9.4s, v4.s[3] \n"
"fmla v12.4s, v9.4s, v6.s[3] \n"
"fmla v13.4s, v9.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r10 r11 r12 r13
"shll v0.4s, v4.4h, #16 \n"
"shll2 v1.4s, v4.8h, #16 \n"
"shll v2.4s, v5.4h, #16 \n"
"shll2 v3.4s, v5.8h, #16 \n"
"shll v4.4s, v6.4h, #16 \n"
"shll2 v5.4s, v6.8h, #16 \n"
"shll v6.4s, v7.4h, #16 \n"
"shll2 v7.4s, v7.8h, #16 \n"
"shll v8.4s, %14.4h, #16 \n"
"shll2 v9.4s, %14.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[0] \n"
"fmla v11.4s, v8.4s, v2.s[0] \n"
"fmla v12.4s, v8.4s, v4.s[0] \n"
"fmla v13.4s, v8.4s, v6.s[0] \n"
"fmla v10.4s, v9.4s, v0.s[1] \n"
"fmla v11.4s, v9.4s, v2.s[1] \n"
"fmla v12.4s, v9.4s, v4.s[1] \n"
"fmla v13.4s, v9.4s, v6.s[1] \n"
"shll v8.4s, %15.4h, #16 \n"
"shll2 v9.4s, %15.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v8.4s, v4.s[2] \n"
"fmla v13.4s, v8.4s, v6.s[2] \n"
"fmla v10.4s, v9.4s, v0.s[3] \n"
"fmla v11.4s, v9.4s, v2.s[3] \n"
"fmla v12.4s, v9.4s, v4.s[3] \n"
"fmla v13.4s, v9.4s, v6.s[3] \n"
"shll v8.4s, %16.4h, #16 \n"
"shll2 v9.4s, %16.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[0] \n"
"fmla v11.4s, v8.4s, v3.s[0] \n"
"fmla v12.4s, v8.4s, v5.s[0] \n"
"fmla v13.4s, v8.4s, v7.s[0] \n"
"fmla v10.4s, v9.4s, v1.s[1] \n"
"fmla v11.4s, v9.4s, v3.s[1] \n"
"fmla v12.4s, v9.4s, v5.s[1] \n"
"fmla v13.4s, v9.4s, v7.s[1] \n"
"shll v8.4s, %17.4h, #16 \n"
"shll2 v9.4s, %17.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v8.4s, v5.s[2] \n"
"fmla v13.4s, v8.4s, v7.s[2] \n"
"fmla v10.4s, v9.4s, v1.s[3] \n"
"fmla v11.4s, v9.4s, v3.s[3] \n"
"fmla v12.4s, v9.4s, v5.s[3] \n"
"fmla v13.4s, v9.4s, v7.s[3] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2] \n" // r18
"shll v0.4s, v0.4h, #16 \n"
"shll v8.4s, %18.4h, #16 \n"
"shll2 v9.4s, %18.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[0] \n"
"fmla v11.4s, v8.4s, v4.s[0] \n"
"fmla v12.4s, v8.4s, v6.s[0] \n"
"fmla v13.4s, v8.4s, v0.s[0] \n"
"fmla v10.4s, v9.4s, v2.s[1] \n"
"fmla v11.4s, v9.4s, v4.s[1] \n"
"fmla v12.4s, v9.4s, v6.s[1] \n"
"fmla v13.4s, v9.4s, v0.s[1] \n"
"shll v8.4s, %19.4h, #16 \n"
"shll2 v9.4s, %19.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v8.4s, v6.s[2] \n"
"fmla v13.4s, v8.4s, v0.s[2] \n"
"fmla v10.4s, v9.4s, v2.s[3] \n"
"fmla v11.4s, v9.4s, v4.s[3] \n"
"fmla v12.4s, v9.4s, v6.s[3] \n"
"fmla v13.4s, v9.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" // r20 r21 r22 r23
"shll v0.4s, v4.4h, #16 \n"
"shll2 v1.4s, v4.8h, #16 \n"
"shll v2.4s, v5.4h, #16 \n"
"shll2 v3.4s, v5.8h, #16 \n"
"shll v4.4s, v6.4h, #16 \n"
"shll2 v5.4s, v6.8h, #16 \n"
"shll v6.4s, v7.4h, #16 \n"
"shll2 v7.4s, v7.8h, #16 \n"
"shll v8.4s, %20.4h, #16 \n"
"shll2 v9.4s, %20.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[0] \n"
"fmla v11.4s, v8.4s, v2.s[0] \n"
"fmla v12.4s, v8.4s, v4.s[0] \n"
"fmla v13.4s, v8.4s, v6.s[0] \n"
"fmla v10.4s, v9.4s, v0.s[1] \n"
"fmla v11.4s, v9.4s, v2.s[1] \n"
"fmla v12.4s, v9.4s, v4.s[1] \n"
"fmla v13.4s, v9.4s, v6.s[1] \n"
"shll v8.4s, %21.4h, #16 \n"
"shll2 v9.4s, %21.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v8.4s, v4.s[2] \n"
"fmla v13.4s, v8.4s, v6.s[2] \n"
"fmla v10.4s, v9.4s, v0.s[3] \n"
"fmla v11.4s, v9.4s, v2.s[3] \n"
"fmla v12.4s, v9.4s, v4.s[3] \n"
"fmla v13.4s, v9.4s, v6.s[3] \n"
"shll v8.4s, %22.4h, #16 \n"
"shll2 v9.4s, %22.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[0] \n"
"fmla v11.4s, v8.4s, v3.s[0] \n"
"fmla v12.4s, v8.4s, v5.s[0] \n"
"fmla v13.4s, v8.4s, v7.s[0] \n"
"fmla v10.4s, v9.4s, v1.s[1] \n"
"fmla v11.4s, v9.4s, v3.s[1] \n"
"fmla v12.4s, v9.4s, v5.s[1] \n"
"fmla v13.4s, v9.4s, v7.s[1] \n"
"shll v8.4s, %23.4h, #16 \n"
"shll2 v9.4s, %23.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v8.4s, v5.s[2] \n"
"fmla v13.4s, v8.4s, v7.s[2] \n"
"fmla v10.4s, v9.4s, v1.s[3] \n"
"fmla v11.4s, v9.4s, v3.s[3] \n"
"fmla v12.4s, v9.4s, v5.s[3] \n"
"fmla v13.4s, v9.4s, v7.s[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3] \n" // r28
"shll v0.4s, v0.4h, #16 \n"
"shll v8.4s, %24.4h, #16 \n"
"shll2 v9.4s, %24.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[0] \n"
"fmla v11.4s, v8.4s, v4.s[0] \n"
"fmla v12.4s, v8.4s, v6.s[0] \n"
"fmla v13.4s, v8.4s, v0.s[0] \n"
"fmla v10.4s, v9.4s, v2.s[1] \n"
"fmla v11.4s, v9.4s, v4.s[1] \n"
"fmla v12.4s, v9.4s, v6.s[1] \n"
"fmla v13.4s, v9.4s, v0.s[1] \n"
"shll v8.4s, %25.4h, #16 \n"
"shll2 v9.4s, %25.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v8.4s, v6.s[2] \n"
"fmla v13.4s, v8.4s, v0.s[2] \n"
"fmla v10.4s, v9.4s, v2.s[3] \n"
"fmla v11.4s, v9.4s, v4.s[3] \n"
"fmla v12.4s, v9.4s, v6.s[3] \n"
"fmla v13.4s, v9.4s, v0.s[3] \n"
"st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_01), // %8
"w"(_k00_23), // %9
"w"(_k01_01), // %10
"w"(_k01_23), // %11
"w"(_k02_01), // %12
"w"(_k02_23), // %13
"w"(_k10_01), // %14
"w"(_k10_23), // %15
"w"(_k11_01), // %16
"w"(_k11_23), // %17
"w"(_k12_01), // %18
"w"(_k12_23), // %19
"w"(_k20_01), // %20
"w"(_k20_23), // %21
"w"(_k21_01), // %22
"w"(_k21_23), // %23
"w"(_k22_01), // %24
"w"(_k22_23) // %25
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n" // sum0 sum1 sum2 sum3
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n" // r00 r01 r02 r03 r04 r05 r06 r07
"vshll.u16 q0, d8, #16 \n"
"vshll.u16 q1, d9, #16 \n"
"vshll.u16 q2, d10, #16 \n"
"vshll.u16 q3, d11, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%1, #64] \n"
"vld1.f32 {d1}, [%1 :64] \n" // r08
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n" // r10 r11 r12 r13 r14 r15 r16 r17
"vshll.u16 q0, d8, #16 \n"
"vshll.u16 q1, d9, #16 \n"
"vshll.u16 q2, d10, #16 \n"
"vshll.u16 q3, d11, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #64] \n"
"vld1.f32 {d1}, [%2 :64] \n" // r18
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%3, #256] \n"
"vldm %3!, {d8-d15} \n" // r20 r21 r22 r23 r24 r25 r26 r27
"vshll.u16 q0, d8, #16 \n"
"vshll.u16 q1, d9, #16 \n"
"vshll.u16 q2, d10, #16 \n"
"vshll.u16 q3, d11, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #64] \n"
"vld1.f32 {d1}, [%3 :64] \n" // r28
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
// "pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"sub %4, %4, #256 \n" // kptr -= 8 * 16;
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v12.4s, v13.4s}, [%0] \n" // sum0 sum1
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" // r00 r01 r02 r03
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v6.4s, %8.4h, #16 \n"
"shll2 v7.4s, %8.8h, #16 \n"
"shll v8.4s, %9.4h, #16 \n"
"shll2 v9.4s, %9.8h, #16 \n"
"fmul v10.4s, v6.4s, v0.s[0] \n"
"fmul v11.4s, v6.4s, v2.s[0] \n"
"fmla v12.4s, v7.4s, v0.s[1] \n"
"fmla v13.4s, v7.4s, v2.s[1] \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v9.4s, v0.s[3] \n"
"fmla v13.4s, v9.4s, v2.s[3] \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v4.4h}, [%1] \n" // r04
"shll v4.4s, v4.4h, #16 \n"
"shll v6.4s, %10.4h, #16 \n"
"shll2 v7.4s, %10.8h, #16 \n"
"shll v8.4s, %11.4h, #16 \n"
"shll2 v9.4s, %11.8h, #16 \n"
"fmla v10.4s, v6.4s, v1.s[0] \n"
"fmla v11.4s, v6.4s, v3.s[0] \n"
"fmla v12.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v9.4s, v1.s[3] \n"
"fmla v13.4s, v9.4s, v3.s[3] \n"
"shll v6.4s, %12.4h, #16 \n"
"shll2 v7.4s, %12.8h, #16 \n"
"shll v8.4s, %13.4h, #16 \n"
"shll2 v9.4s, %13.8h, #16 \n"
"fmla v10.4s, v6.4s, v2.s[0] \n"
"fmla v11.4s, v6.4s, v4.s[0] \n"
"fmla v12.4s, v7.4s, v2.s[1] \n"
"fmla v13.4s, v7.4s, v4.s[1] \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v9.4s, v2.s[3] \n"
"fmla v13.4s, v9.4s, v4.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r10 r11 r12 r13
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v6.4s, %14.4h, #16 \n"
"shll2 v7.4s, %14.8h, #16 \n"
"shll v8.4s, %15.4h, #16 \n"
"shll2 v9.4s, %15.8h, #16 \n"
"fmla v10.4s, v6.4s, v0.s[0] \n"
"fmla v11.4s, v6.4s, v2.s[0] \n"
"fmla v12.4s, v7.4s, v0.s[1] \n"
"fmla v13.4s, v7.4s, v2.s[1] \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v9.4s, v0.s[3] \n"
"fmla v13.4s, v9.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v4.4h}, [%2] \n" // r14
"shll v4.4s, v4.4h, #16 \n"
"shll v6.4s, %16.4h, #16 \n"
"shll2 v7.4s, %16.8h, #16 \n"
"shll v8.4s, %17.4h, #16 \n"
"shll2 v9.4s, %17.8h, #16 \n"
"fmla v10.4s, v6.4s, v1.s[0] \n"
"fmla v11.4s, v6.4s, v3.s[0] \n"
"fmla v12.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v9.4s, v1.s[3] \n"
"fmla v13.4s, v9.4s, v3.s[3] \n"
"shll v6.4s, %18.4h, #16 \n"
"shll2 v7.4s, %18.8h, #16 \n"
"shll v8.4s, %19.4h, #16 \n"
"shll2 v9.4s, %19.8h, #16 \n"
"fmla v10.4s, v6.4s, v2.s[0] \n"
"fmla v11.4s, v6.4s, v4.s[0] \n"
"fmla v12.4s, v7.4s, v2.s[1] \n"
"fmla v13.4s, v7.4s, v4.s[1] \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v9.4s, v2.s[3] \n"
"fmla v13.4s, v9.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r20 r21 r22 r23
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v6.4s, %20.4h, #16 \n"
"shll2 v7.4s, %20.8h, #16 \n"
"shll v8.4s, %21.4h, #16 \n"
"shll2 v9.4s, %21.8h, #16 \n"
"fmla v10.4s, v6.4s, v0.s[0] \n"
"fmla v11.4s, v6.4s, v2.s[0] \n"
"fmla v12.4s, v7.4s, v0.s[1] \n"
"fmla v13.4s, v7.4s, v2.s[1] \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v9.4s, v0.s[3] \n"
"fmla v13.4s, v9.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3] \n" // r24
"shll v4.4s, v4.4h, #16 \n"
"shll v6.4s, %22.4h, #16 \n"
"shll2 v7.4s, %22.8h, #16 \n"
"shll v8.4s, %23.4h, #16 \n"
"shll2 v9.4s, %23.8h, #16 \n"
"fmla v10.4s, v6.4s, v1.s[0] \n"
"fmla v11.4s, v6.4s, v3.s[0] \n"
"fmla v12.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v9.4s, v1.s[3] \n"
"fmla v13.4s, v9.4s, v3.s[3] \n"
"shll v6.4s, %24.4h, #16 \n"
"shll2 v7.4s, %24.8h, #16 \n"
"shll v8.4s, %25.4h, #16 \n"
"shll2 v9.4s, %25.8h, #16 \n"
"fmla v10.4s, v6.4s, v2.s[0] \n"
"fmla v11.4s, v6.4s, v4.s[0] \n"
"fmla v12.4s, v7.4s, v2.s[1] \n"
"fmla v13.4s, v7.4s, v4.s[1] \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v9.4s, v2.s[3] \n"
"fmla v13.4s, v9.4s, v4.s[3] \n"
"fadd v12.4s, v10.4s, v12.4s \n"
"fadd v13.4s, v11.4s, v13.4s \n"
"st1 {v12.4s, v13.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_01), // %8
"w"(_k00_23), // %9
"w"(_k01_01), // %10
"w"(_k01_23), // %11
"w"(_k02_01), // %12
"w"(_k02_23), // %13
"w"(_k10_01), // %14
"w"(_k10_23), // %15
"w"(_k11_01), // %16
"w"(_k11_23), // %17
"w"(_k12_01), // %18
"w"(_k12_23), // %19
"w"(_k20_01), // %20
"w"(_k20_23), // %21
"w"(_k21_01), // %22
"w"(_k21_23), // %23
"w"(_k22_01), // %24
"w"(_k22_23) // %25
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128] \n" // sum0 sum1
"pld [%1, #256] \n"
"vld1.u16 {d4-d7}, [%1 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q12, q8, d0[0] \n"
"vmul.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%1, #64] \n"
"vld1.f32 {d9}, [%1 :64] \n" // r04
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #64] \n"
"vld1.f32 {d9}, [%2 :64] \n" // r14
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #64] \n"
"vld1.f32 {d9}, [%3 :64] \n" // r24
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
// "pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"vadd.f32 q14, q12, q14 \n"
"vadd.f32 q15, q13, q15 \n"
"sub %4, %4, #256 \n" // kptr -= 8 * 16;
"vst1.f32 {d28-d31}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v13.4s}, [%0] \n" // sum0
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v0.4h, v1.4h, v2.4h}, [%1] \n" // r00 r01 r02
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v6.4s, %8.4h, #16 \n"
"shll2 v7.4s, %8.8h, #16 \n"
"fmul v10.4s, v6.4s, v0.s[0] \n"
"fmul v11.4s, v7.4s, v0.s[1] \n"
"shll v8.4s, %9.4h, #16 \n"
"shll2 v9.4s, %9.8h, #16 \n"
"fmul v12.4s, v8.4s, v0.s[2] \n"
"fmla v13.4s, v9.4s, v0.s[3] \n"
"shll v6.4s, %10.4h, #16 \n"
"shll2 v7.4s, %10.8h, #16 \n"
"fmla v10.4s, v6.4s, v1.s[0] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"shll v8.4s, %11.4h, #16 \n"
"shll2 v9.4s, %11.8h, #16 \n"
"fmla v12.4s, v8.4s, v1.s[2] \n"
"fmla v13.4s, v9.4s, v1.s[3] \n"
"shll v6.4s, %12.4h, #16 \n"
"shll2 v7.4s, %12.8h, #16 \n"
"fmla v10.4s, v6.4s, v2.s[0] \n"
"fmla v11.4s, v7.4s, v2.s[1] \n"
"shll v8.4s, %13.4h, #16 \n"
"shll2 v9.4s, %13.8h, #16 \n"
"fmla v12.4s, v8.4s, v2.s[2] \n"
"fmla v13.4s, v9.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v3.4h, v4.4h, v5.4h}, [%2] \n" // r10 r11 r12
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, %14.4h, #16 \n"
"shll2 v7.4s, %14.8h, #16 \n"
"fmla v10.4s, v6.4s, v3.s[0] \n"
"fmla v11.4s, v7.4s, v3.s[1] \n"
"shll v8.4s, %15.4h, #16 \n"
"shll2 v9.4s, %15.8h, #16 \n"
"fmla v12.4s, v8.4s, v3.s[2] \n"
"fmla v13.4s, v9.4s, v3.s[3] \n"
"shll v6.4s, %16.4h, #16 \n"
"shll2 v7.4s, %16.8h, #16 \n"
"fmla v10.4s, v6.4s, v4.s[0] \n"
"fmla v11.4s, v7.4s, v4.s[1] \n"
"shll v8.4s, %17.4h, #16 \n"
"shll2 v9.4s, %17.8h, #16 \n"
"fmla v12.4s, v8.4s, v4.s[2] \n"
"fmla v13.4s, v9.4s, v4.s[3] \n"
"shll v6.4s, %18.4h, #16 \n"
"shll2 v7.4s, %18.8h, #16 \n"
"fmla v10.4s, v6.4s, v5.s[0] \n"
"fmla v11.4s, v7.4s, v5.s[1] \n"
"shll v8.4s, %19.4h, #16 \n"
"shll2 v9.4s, %19.8h, #16 \n"
"fmla v12.4s, v8.4s, v5.s[2] \n"
"fmla v13.4s, v9.4s, v5.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v0.4h, v1.4h, v2.4h}, [%3] \n" // r20 r21 r22
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v6.4s, %20.4h, #16 \n"
"shll2 v7.4s, %20.8h, #16 \n"
"fmla v10.4s, v6.4s, v0.s[0] \n"
"fmla v11.4s, v7.4s, v0.s[1] \n"
"shll v8.4s, %21.4h, #16 \n"
"shll2 v9.4s, %21.8h, #16 \n"
"fmla v12.4s, v8.4s, v0.s[2] \n"
"fmla v13.4s, v9.4s, v0.s[3] \n"
"shll v6.4s, %22.4h, #16 \n"
"shll2 v7.4s, %22.8h, #16 \n"
"fmla v10.4s, v6.4s, v1.s[0] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"shll v8.4s, %23.4h, #16 \n"
"shll2 v9.4s, %23.8h, #16 \n"
"fmla v12.4s, v8.4s, v1.s[2] \n"
"fmla v13.4s, v9.4s, v1.s[3] \n"
"shll v6.4s, %24.4h, #16 \n"
"shll2 v7.4s, %24.8h, #16 \n"
"fmla v10.4s, v6.4s, v2.s[0] \n"
"fmla v11.4s, v7.4s, v2.s[1] \n"
"shll v8.4s, %25.4h, #16 \n"
"shll2 v9.4s, %25.8h, #16 \n"
"fmla v12.4s, v8.4s, v2.s[2] \n"
"fmla v13.4s, v9.4s, v2.s[3] \n"
"fadd v11.4s, v10.4s, v11.4s \n"
"add %1, %1, #16 \n"
"fadd v13.4s, v12.4s, v13.4s \n"
"add %2, %2, #16 \n"
"fadd v13.4s, v11.4s, v13.4s \n"
"add %3, %3, #16 \n"
"st1 {v13.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_01), // %8
"w"(_k00_23), // %9
"w"(_k01_01), // %10
"w"(_k01_23), // %11
"w"(_k02_01), // %12
"w"(_k02_23), // %13
"w"(_k10_01), // %14
"w"(_k10_23), // %15
"w"(_k11_01), // %16
"w"(_k11_23), // %17
"w"(_k12_01), // %18
"w"(_k12_23), // %19
"w"(_k20_01), // %20
"w"(_k20_23), // %21
"w"(_k21_01), // %22
"w"(_k21_23), // %23
"w"(_k22_01), // %24
"w"(_k22_23) // %25
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
#else // __aarch64__
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d30-d31}, [%0 :128] \n" // sum0
"pld [%1, #192] \n"
"vld1.u16 {d2-d4}, [%1 :64] \n" // r00 r01 r02
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q2, d4, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmul.f32 q12, q8, d0[0] \n"
"vmul.f32 q13, q9, d0[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q11, d3[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%2, #192] \n"
"vld1.u16 {d2-d4}, [%2 :64] \n" // r10 r11 r12
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q2, d4, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q9, d0[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q11, d3[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%3, #192] \n"
"vld1.u16 {d2-d4}, [%3 :64] \n" // r20 r21 r22
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q2, d4, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q9, d0[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q11, d3[1] \n"
// "pld [%4, #256] \n"
"vld1.u16 {d20-d23}, [%4 :128] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q11, d5[1] \n"
"add %1, %1, #16 \n"
"vadd.f32 q13, q12, q13 \n"
"add %2, %2, #16 \n"
"vadd.f32 q15, q14, q15 \n"
"add %3, %3, #16 \n"
"vadd.f32 q15, q13, q15 \n"
"sub %4, %4, #256 \n" // kptr -= 8 * 16 * 2;
"vst1.f32 {d30-d31}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
const float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p).row<const unsigned short>(q);
#if __aarch64__
// 16 * 9
uint16x8_t _k00_01 = vld1q_u16(kptr);
uint16x8_t _k00_23 = vld1q_u16(kptr + 8);
uint16x8_t _k01_01 = vld1q_u16(kptr + 16);
uint16x8_t _k01_23 = vld1q_u16(kptr + 24);
uint16x8_t _k02_01 = vld1q_u16(kptr + 32);
uint16x8_t _k02_23 = vld1q_u16(kptr + 40);
uint16x8_t _k10_01 = vld1q_u16(kptr + 48);
uint16x8_t _k10_23 = vld1q_u16(kptr + 56);
uint16x8_t _k11_01 = vld1q_u16(kptr + 64);
uint16x8_t _k11_23 = vld1q_u16(kptr + 72);
uint16x8_t _k12_01 = vld1q_u16(kptr + 80);
uint16x8_t _k12_23 = vld1q_u16(kptr + 88);
uint16x8_t _k20_01 = vld1q_u16(kptr + 96);
uint16x8_t _k20_23 = vld1q_u16(kptr + 104);
uint16x8_t _k21_01 = vld1q_u16(kptr + 112);
uint16x8_t _k21_23 = vld1q_u16(kptr + 120);
uint16x8_t _k22_01 = vld1q_u16(kptr + 128);
uint16x8_t _k22_23 = vld1q_u16(kptr + 136);
#endif // __aarch64__
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%1], #64 \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r00 r01 r02 r03
"shll v0.4s, v4.4h, #16 \n"
"shll2 v1.4s, v4.8h, #16 \n"
"shll v2.4s, v5.4h, #16 \n"
"shll2 v3.4s, v5.8h, #16 \n"
"shll v4.4s, v6.4h, #16 \n"
"shll2 v5.4s, v6.8h, #16 \n"
"shll v6.4s, v7.4h, #16 \n"
"shll2 v7.4s, v7.8h, #16 \n"
"shll v8.4s, %10.4h, #16 \n"
"shll2 v9.4s, %10.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[0] \n"
"fmla v11.4s, v8.4s, v2.s[0] \n"
"fmla v12.4s, v8.4s, v4.s[0] \n"
"fmla v13.4s, v8.4s, v6.s[0] \n"
"fmla v10.4s, v9.4s, v0.s[1] \n"
"fmla v11.4s, v9.4s, v2.s[1] \n"
"fmla v12.4s, v9.4s, v4.s[1] \n"
"fmla v13.4s, v9.4s, v6.s[1] \n"
"shll v8.4s, %11.4h, #16 \n"
"shll2 v9.4s, %11.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v8.4s, v4.s[2] \n"
"fmla v13.4s, v8.4s, v6.s[2] \n"
"fmla v10.4s, v9.4s, v0.s[3] \n"
"fmla v11.4s, v9.4s, v2.s[3] \n"
"fmla v12.4s, v9.4s, v4.s[3] \n"
"fmla v13.4s, v9.4s, v6.s[3] \n"
"shll v8.4s, %12.4h, #16 \n"
"shll2 v9.4s, %12.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[0] \n"
"fmla v11.4s, v8.4s, v3.s[0] \n"
"fmla v12.4s, v8.4s, v5.s[0] \n"
"fmla v13.4s, v8.4s, v7.s[0] \n"
"fmla v10.4s, v9.4s, v1.s[1] \n"
"fmla v11.4s, v9.4s, v3.s[1] \n"
"fmla v12.4s, v9.4s, v5.s[1] \n"
"fmla v13.4s, v9.4s, v7.s[1] \n"
"shll v8.4s, %13.4h, #16 \n"
"shll2 v9.4s, %13.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v8.4s, v5.s[2] \n"
"fmla v13.4s, v8.4s, v7.s[2] \n"
"fmla v10.4s, v9.4s, v1.s[3] \n"
"fmla v11.4s, v9.4s, v3.s[3] \n"
"fmla v12.4s, v9.4s, v5.s[3] \n"
"fmla v13.4s, v9.4s, v7.s[3] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2] \n" // r08
"shll v0.4s, v0.4h, #16 \n"
"shll v8.4s, %14.4h, #16 \n"
"shll2 v9.4s, %14.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[0] \n"
"fmla v11.4s, v8.4s, v4.s[0] \n"
"fmla v12.4s, v8.4s, v6.s[0] \n"
"fmla v13.4s, v8.4s, v0.s[0] \n"
"fmla v10.4s, v9.4s, v2.s[1] \n"
"fmla v11.4s, v9.4s, v4.s[1] \n"
"fmla v12.4s, v9.4s, v6.s[1] \n"
"fmla v13.4s, v9.4s, v0.s[1] \n"
"shll v8.4s, %15.4h, #16 \n"
"shll2 v9.4s, %15.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v8.4s, v6.s[2] \n"
"fmla v13.4s, v8.4s, v0.s[2] \n"
"fmla v10.4s, v9.4s, v2.s[3] \n"
"fmla v11.4s, v9.4s, v4.s[3] \n"
"fmla v12.4s, v9.4s, v6.s[3] \n"
"fmla v13.4s, v9.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" // r10 r11 r12 r13
"shll v0.4s, v4.4h, #16 \n"
"shll2 v1.4s, v4.8h, #16 \n"
"shll v2.4s, v5.4h, #16 \n"
"shll2 v3.4s, v5.8h, #16 \n"
"shll v4.4s, v6.4h, #16 \n"
"shll2 v5.4s, v6.8h, #16 \n"
"shll v6.4s, v7.4h, #16 \n"
"shll2 v7.4s, v7.8h, #16 \n"
"shll v8.4s, %16.4h, #16 \n"
"shll2 v9.4s, %16.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[0] \n"
"fmla v11.4s, v8.4s, v2.s[0] \n"
"fmla v12.4s, v8.4s, v4.s[0] \n"
"fmla v13.4s, v8.4s, v6.s[0] \n"
"fmla v10.4s, v9.4s, v0.s[1] \n"
"fmla v11.4s, v9.4s, v2.s[1] \n"
"fmla v12.4s, v9.4s, v4.s[1] \n"
"fmla v13.4s, v9.4s, v6.s[1] \n"
"shll v8.4s, %17.4h, #16 \n"
"shll2 v9.4s, %17.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v8.4s, v4.s[2] \n"
"fmla v13.4s, v8.4s, v6.s[2] \n"
"fmla v10.4s, v9.4s, v0.s[3] \n"
"fmla v11.4s, v9.4s, v2.s[3] \n"
"fmla v12.4s, v9.4s, v4.s[3] \n"
"fmla v13.4s, v9.4s, v6.s[3] \n"
"shll v8.4s, %18.4h, #16 \n"
"shll2 v9.4s, %18.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[0] \n"
"fmla v11.4s, v8.4s, v3.s[0] \n"
"fmla v12.4s, v8.4s, v5.s[0] \n"
"fmla v13.4s, v8.4s, v7.s[0] \n"
"fmla v10.4s, v9.4s, v1.s[1] \n"
"fmla v11.4s, v9.4s, v3.s[1] \n"
"fmla v12.4s, v9.4s, v5.s[1] \n"
"fmla v13.4s, v9.4s, v7.s[1] \n"
"shll v8.4s, %19.4h, #16 \n"
"shll2 v9.4s, %19.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v8.4s, v5.s[2] \n"
"fmla v13.4s, v8.4s, v7.s[2] \n"
"fmla v10.4s, v9.4s, v1.s[3] \n"
"fmla v11.4s, v9.4s, v3.s[3] \n"
"fmla v12.4s, v9.4s, v5.s[3] \n"
"fmla v13.4s, v9.4s, v7.s[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3] \n" // r18
"shll v0.4s, v0.4h, #16 \n"
"shll v8.4s, %20.4h, #16 \n"
"shll2 v9.4s, %20.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[0] \n"
"fmla v11.4s, v8.4s, v4.s[0] \n"
"fmla v12.4s, v8.4s, v6.s[0] \n"
"fmla v13.4s, v8.4s, v0.s[0] \n"
"fmla v10.4s, v9.4s, v2.s[1] \n"
"fmla v11.4s, v9.4s, v4.s[1] \n"
"fmla v12.4s, v9.4s, v6.s[1] \n"
"fmla v13.4s, v9.4s, v0.s[1] \n"
"shll v8.4s, %21.4h, #16 \n"
"shll2 v9.4s, %21.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v8.4s, v6.s[2] \n"
"fmla v13.4s, v8.4s, v0.s[2] \n"
"fmla v10.4s, v9.4s, v2.s[3] \n"
"fmla v11.4s, v9.4s, v4.s[3] \n"
"fmla v12.4s, v9.4s, v6.s[3] \n"
"fmla v13.4s, v9.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // r20 r21 r22 r23
"shll v0.4s, v4.4h, #16 \n"
"shll2 v1.4s, v4.8h, #16 \n"
"shll v2.4s, v5.4h, #16 \n"
"shll2 v3.4s, v5.8h, #16 \n"
"shll v4.4s, v6.4h, #16 \n"
"shll2 v5.4s, v6.8h, #16 \n"
"shll v6.4s, v7.4h, #16 \n"
"shll2 v7.4s, v7.8h, #16 \n"
"shll v8.4s, %22.4h, #16 \n"
"shll2 v9.4s, %22.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[0] \n"
"fmla v11.4s, v8.4s, v2.s[0] \n"
"fmla v12.4s, v8.4s, v4.s[0] \n"
"fmla v13.4s, v8.4s, v6.s[0] \n"
"fmla v10.4s, v9.4s, v0.s[1] \n"
"fmla v11.4s, v9.4s, v2.s[1] \n"
"fmla v12.4s, v9.4s, v4.s[1] \n"
"fmla v13.4s, v9.4s, v6.s[1] \n"
"shll v8.4s, %23.4h, #16 \n"
"shll2 v9.4s, %23.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v8.4s, v4.s[2] \n"
"fmla v13.4s, v8.4s, v6.s[2] \n"
"fmla v10.4s, v9.4s, v0.s[3] \n"
"fmla v11.4s, v9.4s, v2.s[3] \n"
"fmla v12.4s, v9.4s, v4.s[3] \n"
"fmla v13.4s, v9.4s, v6.s[3] \n"
"shll v8.4s, %24.4h, #16 \n"
"shll2 v9.4s, %24.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[0] \n"
"fmla v11.4s, v8.4s, v3.s[0] \n"
"fmla v12.4s, v8.4s, v5.s[0] \n"
"fmla v13.4s, v8.4s, v7.s[0] \n"
"fmla v10.4s, v9.4s, v1.s[1] \n"
"fmla v11.4s, v9.4s, v3.s[1] \n"
"fmla v12.4s, v9.4s, v5.s[1] \n"
"fmla v13.4s, v9.4s, v7.s[1] \n"
"shll v8.4s, %25.4h, #16 \n"
"shll2 v9.4s, %25.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v8.4s, v5.s[2] \n"
"fmla v13.4s, v8.4s, v7.s[2] \n"
"fmla v10.4s, v9.4s, v1.s[3] \n"
"fmla v11.4s, v9.4s, v3.s[3] \n"
"fmla v12.4s, v9.4s, v5.s[3] \n"
"fmla v13.4s, v9.4s, v7.s[3] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v0.4h}, [%4] \n" // r28
"shll v0.4s, v0.4h, #16 \n"
"shll v8.4s, %26.4h, #16 \n"
"shll2 v9.4s, %26.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[0] \n"
"fmla v11.4s, v8.4s, v4.s[0] \n"
"fmla v12.4s, v8.4s, v6.s[0] \n"
"fmla v13.4s, v8.4s, v0.s[0] \n"
"fmla v10.4s, v9.4s, v2.s[1] \n"
"fmla v11.4s, v9.4s, v4.s[1] \n"
"fmla v12.4s, v9.4s, v6.s[1] \n"
"fmla v13.4s, v9.4s, v0.s[1] \n"
"shll v8.4s, %27.4h, #16 \n"
"shll2 v9.4s, %27.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v8.4s, v6.s[2] \n"
"fmla v13.4s, v8.4s, v0.s[2] \n"
"fmla v10.4s, v9.4s, v2.s[3] \n"
"fmla v11.4s, v9.4s, v4.s[3] \n"
"fmla v12.4s, v9.4s, v6.s[3] \n"
"fmla v13.4s, v9.4s, v0.s[3] \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"st1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%0], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00_01), // %10
"w"(_k00_23), // %11
"w"(_k01_01), // %12
"w"(_k01_23), // %13
"w"(_k02_01), // %14
"w"(_k02_23), // %15
"w"(_k10_01), // %16
"w"(_k10_23), // %17
"w"(_k11_01), // %18
"w"(_k11_23), // %19
"w"(_k12_01), // %20
"w"(_k12_23), // %21
"w"(_k20_01), // %22
"w"(_k20_23), // %23
"w"(_k21_01), // %24
"w"(_k21_23), // %25
"w"(_k22_01), // %26
"w"(_k22_23) // %27
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n" // sum0 sum1 sum2 sum3
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n" // r00 r01 r02 r03 r04 r05 r06 r07
"vshll.u16 q0, d8, #16 \n"
"vshll.u16 q1, d9, #16 \n"
"vshll.u16 q2, d10, #16 \n"
"vshll.u16 q3, d11, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #64] \n"
"vld1.f32 {d1}, [%2 :64] \n" // r08
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n" // r10 r11 r12 r13 r14 r15 r16 r17
"vshll.u16 q0, d8, #16 \n"
"vshll.u16 q1, d9, #16 \n"
"vshll.u16 q2, d10, #16 \n"
"vshll.u16 q3, d11, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #64] \n"
"vld1.f32 {d1}, [%3 :64] \n" // r18
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%4, #256] \n"
"vldm %4!, {d8-d15} \n" // r20 r21 r22 r23 r24 r25 r26 r27
"vshll.u16 q0, d8, #16 \n"
"vshll.u16 q1, d9, #16 \n"
"vshll.u16 q2, d10, #16 \n"
"vshll.u16 q3, d11, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #64] \n"
"vld1.f32 {d1}, [%4 :64] \n" // r28
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
// "pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"sub %5, %5, #256 \n" // kptr -= 8 * 16;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d26, q14, #16 \n"
"vshrn.u32 d27, q15, #16 \n"
"vst1.f32 {d24-d27}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(kptr) // %5
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v12.4s, v13.4s}, [%1], #32 \n" // sum0 sum1
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r00 r01 r02 r03
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v6.4s, %10.4h, #16 \n"
"shll2 v7.4s, %10.8h, #16 \n"
"fmul v10.4s, v6.4s, v0.s[0] \n"
"fmul v11.4s, v6.4s, v2.s[0] \n"
"fmla v12.4s, v7.4s, v0.s[1] \n"
"fmla v13.4s, v7.4s, v2.s[1] \n"
"shll v8.4s, %11.4h, #16 \n"
"shll2 v9.4s, %11.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v9.4s, v0.s[3] \n"
"fmla v13.4s, v9.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v4.4h}, [%2] \n" // r04
"shll v4.4s, v4.4h, #16 \n"
"shll v6.4s, %12.4h, #16 \n"
"shll2 v7.4s, %12.8h, #16 \n"
"fmla v10.4s, v6.4s, v1.s[0] \n"
"fmla v11.4s, v6.4s, v3.s[0] \n"
"fmla v12.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v3.s[1] \n"
"shll v8.4s, %13.4h, #16 \n"
"shll2 v9.4s, %13.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v9.4s, v1.s[3] \n"
"fmla v13.4s, v9.4s, v3.s[3] \n"
"shll v6.4s, %14.4h, #16 \n"
"shll2 v7.4s, %14.8h, #16 \n"
"fmla v10.4s, v6.4s, v2.s[0] \n"
"fmla v11.4s, v6.4s, v4.s[0] \n"
"fmla v12.4s, v7.4s, v2.s[1] \n"
"fmla v13.4s, v7.4s, v4.s[1] \n"
"shll v8.4s, %15.4h, #16 \n"
"shll2 v9.4s, %15.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v9.4s, v2.s[3] \n"
"fmla v13.4s, v9.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r10 r11 r12 r13
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v6.4s, %16.4h, #16 \n"
"shll2 v7.4s, %16.8h, #16 \n"
"fmla v10.4s, v6.4s, v0.s[0] \n"
"fmla v11.4s, v6.4s, v2.s[0] \n"
"fmla v12.4s, v7.4s, v0.s[1] \n"
"fmla v13.4s, v7.4s, v2.s[1] \n"
"shll v8.4s, %17.4h, #16 \n"
"shll2 v9.4s, %17.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v9.4s, v0.s[3] \n"
"fmla v13.4s, v9.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3] \n" // r14
"shll v4.4s, v4.4h, #16 \n"
"shll v6.4s, %18.4h, #16 \n"
"shll2 v7.4s, %18.8h, #16 \n"
"fmla v10.4s, v6.4s, v1.s[0] \n"
"fmla v11.4s, v6.4s, v3.s[0] \n"
"fmla v12.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v3.s[1] \n"
"shll v8.4s, %19.4h, #16 \n"
"shll2 v9.4s, %19.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v9.4s, v1.s[3] \n"
"fmla v13.4s, v9.4s, v3.s[3] \n"
"shll v6.4s, %20.4h, #16 \n"
"shll2 v7.4s, %20.8h, #16 \n"
"fmla v10.4s, v6.4s, v2.s[0] \n"
"fmla v11.4s, v6.4s, v4.s[0] \n"
"fmla v12.4s, v7.4s, v2.s[1] \n"
"fmla v13.4s, v7.4s, v4.s[1] \n"
"shll v8.4s, %21.4h, #16 \n"
"shll2 v9.4s, %21.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v9.4s, v2.s[3] \n"
"fmla v13.4s, v9.4s, v4.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r20 r21 r22 r23
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v6.4s, %22.4h, #16 \n"
"shll2 v7.4s, %22.8h, #16 \n"
"fmla v10.4s, v6.4s, v0.s[0] \n"
"fmla v11.4s, v6.4s, v2.s[0] \n"
"fmla v12.4s, v7.4s, v0.s[1] \n"
"fmla v13.4s, v7.4s, v2.s[1] \n"
"shll v8.4s, %23.4h, #16 \n"
"shll2 v9.4s, %23.8h, #16 \n"
"fmla v10.4s, v8.4s, v0.s[2] \n"
"fmla v11.4s, v8.4s, v2.s[2] \n"
"fmla v12.4s, v9.4s, v0.s[3] \n"
"fmla v13.4s, v9.4s, v2.s[3] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v4.4h}, [%4] \n" // r24
"shll v4.4s, v4.4h, #16 \n"
"shll v6.4s, %24.4h, #16 \n"
"shll2 v7.4s, %24.8h, #16 \n"
"fmla v10.4s, v6.4s, v1.s[0] \n"
"fmla v11.4s, v6.4s, v3.s[0] \n"
"fmla v12.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v3.s[1] \n"
"shll v8.4s, %25.4h, #16 \n"
"shll2 v9.4s, %25.8h, #16 \n"
"fmla v10.4s, v8.4s, v1.s[2] \n"
"fmla v11.4s, v8.4s, v3.s[2] \n"
"fmla v12.4s, v9.4s, v1.s[3] \n"
"fmla v13.4s, v9.4s, v3.s[3] \n"
"shll v6.4s, %26.4h, #16 \n"
"shll2 v7.4s, %26.8h, #16 \n"
"fmla v10.4s, v6.4s, v2.s[0] \n"
"fmla v11.4s, v6.4s, v4.s[0] \n"
"fmla v12.4s, v7.4s, v2.s[1] \n"
"fmla v13.4s, v7.4s, v4.s[1] \n"
"shll v8.4s, %27.4h, #16 \n"
"shll2 v9.4s, %27.8h, #16 \n"
"fmla v10.4s, v8.4s, v2.s[2] \n"
"fmla v11.4s, v8.4s, v4.s[2] \n"
"fmla v12.4s, v9.4s, v2.s[3] \n"
"fmla v13.4s, v9.4s, v4.s[3] \n"
"fadd v12.4s, v10.4s, v12.4s \n"
"fadd v13.4s, v11.4s, v13.4s \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"st1 {v12.4h, v13.4h}, [%0], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00_01), // %10
"w"(_k00_23), // %11
"w"(_k01_01), // %12
"w"(_k01_23), // %13
"w"(_k02_01), // %14
"w"(_k02_23), // %15
"w"(_k10_01), // %16
"w"(_k10_23), // %17
"w"(_k11_01), // %18
"w"(_k11_23), // %19
"w"(_k12_01), // %20
"w"(_k12_23), // %21
"w"(_k20_01), // %22
"w"(_k20_23), // %23
"w"(_k21_01), // %24
"w"(_k21_23), // %25
"w"(_k22_01), // %26
"w"(_k22_23) // %27
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
#else // __aarch64__
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d28-d31}, [%1 :128]! \n" // sum0 sum1
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q12, q8, d0[0] \n"
"vmul.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #64] \n"
"vld1.f32 {d9}, [%2 :64] \n" // r04
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #64] \n"
"vld1.f32 {d9}, [%3 :64] \n" // r14
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #64] \n"
"vld1.f32 {d9}, [%4 :64] \n" // r24
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
// "pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"vadd.f32 q14, q12, q14 \n"
"vadd.f32 q15, q13, q15 \n"
"sub %5, %5, #256 \n" // kptr -= 8 * 16;
"vshrn.u32 d28, q14, #16 \n"
"vshrn.u32 d29, q15, #16 \n"
"vst1.f32 {d28-d29}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(kptr) // %5
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v13.4s}, [%1], #16 \n" // sum0
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v0.4h, v1.4h, v2.4h}, [%2] \n" // r00 r01 r02
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v6.4s, %10.4h, #16 \n"
"shll2 v7.4s, %10.8h, #16 \n"
"fmul v10.4s, v6.4s, v0.s[0] \n"
"fmul v11.4s, v7.4s, v0.s[1] \n"
"shll v8.4s, %11.4h, #16 \n"
"shll2 v9.4s, %11.8h, #16 \n"
"fmul v12.4s, v8.4s, v0.s[2] \n"
"fmla v13.4s, v9.4s, v0.s[3] \n"
"shll v6.4s, %12.4h, #16 \n"
"shll2 v7.4s, %12.8h, #16 \n"
"fmla v10.4s, v6.4s, v1.s[0] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"shll v8.4s, %13.4h, #16 \n"
"shll2 v9.4s, %13.8h, #16 \n"
"fmla v12.4s, v8.4s, v1.s[2] \n"
"fmla v13.4s, v9.4s, v1.s[3] \n"
"shll v6.4s, %14.4h, #16 \n"
"shll2 v7.4s, %14.8h, #16 \n"
"fmla v10.4s, v6.4s, v2.s[0] \n"
"fmla v11.4s, v7.4s, v2.s[1] \n"
"shll v8.4s, %15.4h, #16 \n"
"shll2 v9.4s, %15.8h, #16 \n"
"fmla v12.4s, v8.4s, v2.s[2] \n"
"fmla v13.4s, v9.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v3.4h, v4.4h, v5.4h}, [%3] \n" // r10 r11 r12
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, %16.4h, #16 \n"
"shll2 v7.4s, %16.8h, #16 \n"
"fmla v10.4s, v6.4s, v3.s[0] \n"
"fmla v11.4s, v7.4s, v3.s[1] \n"
"shll v8.4s, %17.4h, #16 \n"
"shll2 v9.4s, %17.8h, #16 \n"
"fmla v12.4s, v8.4s, v3.s[2] \n"
"fmla v13.4s, v9.4s, v3.s[3] \n"
"shll v6.4s, %18.4h, #16 \n"
"shll2 v7.4s, %18.8h, #16 \n"
"fmla v10.4s, v6.4s, v4.s[0] \n"
"fmla v11.4s, v7.4s, v4.s[1] \n"
"shll v8.4s, %19.4h, #16 \n"
"shll2 v9.4s, %19.8h, #16 \n"
"fmla v12.4s, v8.4s, v4.s[2] \n"
"fmla v13.4s, v9.4s, v4.s[3] \n"
"shll v6.4s, %20.4h, #16 \n"
"shll2 v7.4s, %20.8h, #16 \n"
"fmla v10.4s, v6.4s, v5.s[0] \n"
"fmla v11.4s, v7.4s, v5.s[1] \n"
"shll v8.4s, %21.4h, #16 \n"
"shll2 v9.4s, %21.8h, #16 \n"
"fmla v12.4s, v8.4s, v5.s[2] \n"
"fmla v13.4s, v9.4s, v5.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v0.4h, v1.4h, v2.4h}, [%4] \n" // r20 r21 r22
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v6.4s, %22.4h, #16 \n"
"shll2 v7.4s, %22.8h, #16 \n"
"fmla v10.4s, v6.4s, v0.s[0] \n"
"fmla v11.4s, v7.4s, v0.s[1] \n"
"shll v8.4s, %23.4h, #16 \n"
"shll2 v9.4s, %23.8h, #16 \n"
"fmla v12.4s, v8.4s, v0.s[2] \n"
"fmla v13.4s, v9.4s, v0.s[3] \n"
"shll v6.4s, %24.4h, #16 \n"
"shll2 v7.4s, %24.8h, #16 \n"
"fmla v10.4s, v6.4s, v1.s[0] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"shll v8.4s, %25.4h, #16 \n"
"shll2 v9.4s, %25.8h, #16 \n"
"fmla v12.4s, v8.4s, v1.s[2] \n"
"fmla v13.4s, v9.4s, v1.s[3] \n"
"shll v6.4s, %26.4h, #16 \n"
"shll2 v7.4s, %26.8h, #16 \n"
"fmla v10.4s, v6.4s, v2.s[0] \n"
"fmla v11.4s, v7.4s, v2.s[1] \n"
"shll v8.4s, %27.4h, #16 \n"
"shll2 v9.4s, %27.8h, #16 \n"
"fmla v12.4s, v8.4s, v2.s[2] \n"
"fmla v13.4s, v9.4s, v2.s[3] \n"
"fadd v11.4s, v10.4s, v11.4s \n"
"add %2, %2, #16 \n"
"fadd v13.4s, v12.4s, v13.4s \n"
"add %3, %3, #16 \n"
"fadd v13.4s, v11.4s, v13.4s \n"
"add %4, %4, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"st1 {v13.4h}, [%0], #8 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00_01), // %10
"w"(_k00_23), // %11
"w"(_k01_01), // %12
"w"(_k01_23), // %13
"w"(_k02_01), // %14
"w"(_k02_23), // %15
"w"(_k10_01), // %16
"w"(_k10_23), // %17
"w"(_k11_01), // %18
"w"(_k11_23), // %19
"w"(_k12_01), // %20
"w"(_k12_23), // %21
"w"(_k20_01), // %22
"w"(_k20_23), // %23
"w"(_k21_01), // %24
"w"(_k21_23), // %25
"w"(_k22_01), // %26
"w"(_k22_23) // %27
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
#else // __aarch64__
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d30-d31}, [%1 :128]! \n" // sum0
"pld [%2, #192] \n"
"vld1.u16 {d2-d4}, [%2 :64] \n" // r00 r01 r02
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q2, d4, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmul.f32 q12, q8, d0[0] \n"
"vmul.f32 q13, q9, d0[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q11, d3[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%3, #192] \n"
"vld1.u16 {d2-d4}, [%3 :64] \n" // r10 r11 r12
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q2, d4, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q9, d0[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q11, d3[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%4, #192] \n"
"vld1.u16 {d2-d4}, [%4 :64] \n" // r20 r21 r22
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q2, d4, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q9, d0[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q11, d3[1] \n"
// "pld [%5, #256] \n"
"vld1.u16 {d20-d23}, [%5 :128] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q11, d5[1] \n"
"add %2, %2, #16 \n"
"vadd.f32 q13, q12, q13 \n"
"add %3, %3, #16 \n"
"vadd.f32 q15, q14, q15 \n"
"add %4, %4, #16 \n"
"vadd.f32 q15, q13, q15 \n"
"sub %5, %5, #256 \n" // kptr -= 8 * 16 * 2;
"vshrn.u32 d31, q15, #16 \n"
"vst1.u16 {d31}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(kptr) // %5
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
Integral.c | /*
Sequential code to compute the integral f(x) = 4.0/1 + x^2. From 0 to 1.
Solving the integral numerically gives the value of Pi.
*/
#include <stdio.h>
#include "time.h"
int main(void){
// long num_steps = 1000000000;
// long num_steps = 100000000;
double step;
double x, integral;
double sum = 0;
time_t start, end;
time(&start);
step = 1.0/ (double) num_steps;
#pragma omp parallel for
for (int i = 0; i < num_steps; i++){
x = (i + 0.5) * step;
#pragma omp critical
sum = sum + 4.0/(1.0 + (x*x));
}
integral = step * sum;
time(&end);
double time_taken = difftime(end, start);
printf("Integral: %f\n", integral);
printf("Elapsed time: %f\n", time_taken);
}
|
GB_unop__identity_int8_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_uint16)
// op(A') function: GB (_unop_tran__identity_int8_uint16)
// C type: int8_t
// A type: uint16_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = (int8_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_uint16)
(
int8_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix.c |
#include "matrix.h"
/*
* matrix.c
*
* Copyright (c) 2014, Rafat Hussain
* License : BSD 3-Clause
* See COPYRIGHT for more details
*/
typedef struct {
float* a;
int b;
} vipair;
float macheps() {
float macheps;
macheps = 1.0;
while ((macheps + 1.0) > 1.0) {
macheps = macheps / 2.0f;
}
macheps = macheps * 2;
return macheps;
}
float pmax(float a, float b) {
if (a > b) {
return a;
}
else {
return b;
}
}
float pmin(float a, float b) {
if (a < b) {
return a;
}
else {
return b;
}
}
int imax(int a, int b) {
if (a > b) {
return a;
}
else {
return b;
}
}
int imin(int a, int b) {
if (a < b) {
return a;
}
else {
return b;
}
}
float signx(float x) {
float sgn;
if (x >= 0.) {
sgn = 1.0;
}
else {
sgn = -1.0;
}
return sgn;
}
float l2norm(float *vec, int N) {
float l2, sum;
int i;
sum = 0.;
for (i = 0; i < N; ++i) {
sum += vec[i] * vec[i];
}
l2 = sqrtf(sum);
return l2;
}
int compare (const void* ind1, const void* ind2)
{
if (*((vipair *)ind1)->a > *((vipair *)ind2)->a)
return -1;
else if (*((vipair *)ind1)->a < *((vipair *)ind2)->a)
return 1;
else
return 0;
}
void sort1d(float* v,int N, int* pos)
{
vipair* val = NULL;
int i;
if (N <= 0)
return;
val = malloc(sizeof(vipair) * N);
for (i = 0; i < N; ++i) {
val[i].a = &v[i];
val[i].b = i;
}
qsort(val, N, sizeof(vipair), compare);
for (i = 0; i < N; ++i)
pos[i] = val[i].b;
free(val);
}
float array_max_abs(float *array,int N) {
int i;
float m = 0.0;
for (i = 0; i < N;++i) {
if (fabs(array[i]) > m ) {
m = fabsf(array[i]);
}
}
return m;
}
float array_max(float *array,int N) {
int i;
float m = array[0];
for (i = 1; i < N;++i) {
if (array[i] > m ) {
m = array[i];
}
}
return m;
}
float array_min(float *array,int N) {
int i;
float m = array[0];
for (i = 1; i < N;++i) {
if (array[i] < m ) {
m = array[i];
}
}
return m;
}
void dtranspose(float *sig, int rows, int cols,float *col) {
int max,ud,i,k;
if (rows >= cols) {
max = cols;
} else {
max = rows;
}
ud = 0;
for (i= -rows + 1; i < cols; i++) {
if (i <= 0) {
ud++;
if (ud >= max)
ud = max;
for (k = 0; k < ud; k++) {
col[k*rows+k-i] = sig[(k-i)*cols+k];
}
} else {
if (i - cols + rows > 0) {
ud--;
if (ud >= max)
ud = max;
}
for (k = 0; k < ud; k++) {
col[(k+i)*rows+k] = sig[k*cols+k+i];
}
}
}
}
void stranspose(float *sig, int rows, int cols,float *col) {
int t,u;
register int i,j;
// #pragma omp parallel for private(i,j,t,u)
for (i=0; i < rows; i++) {
t = i * cols;
u = 0;
for (j=0; j < cols; j++) {
col[u+i] = sig[j+t];
u+=rows;
}
}
}
void rtranspose(float *m, int rows, int cols,float *n, int r, int c) {
register int i,j;
int rm,cm;
int rm1,cm1,rm2,cm2;
int block;
block = (int) BLOCKSIZE;
if (rows <= block && cols <= block) {
for (i = 0; i < rows; ++i) {
for (j = 0; j < cols; ++j) {
n[i+j*r] = m[j+i*c];
//cout << *(n+i+j*r) << " ";
}
}
//cout << endl;
} else if (cols >= rows) {
rm = rows;
cm1 = (int) ceil((float) cols/2.0);
cm2 = cols - cm1;
rtranspose(m,rm,cm1,n,r,c);
rtranspose(m+cm1,rm,cm2,n+cm1*r,r,c);
} else if (rows > cols) {
rm1 = (int) ceil((float) rows/2.0);
rm2 = rows - rm1;
cm = cols;
rtranspose(m,rm1,cm,n,r,c);
rtranspose(m+rm1*c,rm2,cm,n+rm1,r,c);
}
}
void ctranspose(float *sig, int rows, int cols,float *col) {
int r,c;
int block;
block = (int) TBLOCK;
r= rows;
c = cols;
if (rows >= block || cols >= block) {
rtranspose(sig,rows,cols,col,r,c);
} else {
stranspose(sig,rows,cols,col);
}
}
void mtranspose(float *sig, int rows, int cols,float *col) {
int block;
block = (int) TBLOCK;
if (rows >= block && cols >= block) {
ctranspose(sig,rows,cols,col);
} else {
stranspose(sig,rows,cols,col);
}
}
void itranspose(float *A, int M, int N) {
int i, j, p, iter;
float *buf;
float temp;
if (M == N) {
for (i = 0; i < N; ++i) {
for (j = i + 1; j < N; ++j) {
temp = A[i + j*N];
A[i + j*N] = A[j + i*N];
A[j + i*N] = temp;
}
}
} else if (M > N) {
p = M - N;
buf = (float*)malloc(sizeof(float)* p * N);
memcpy(buf, A + N * N, sizeof(*A)*p*N);
for (i = 0; i < N; ++i) {
for (j = i + 1; j < N; ++j) {
temp = A[i + j*N];
A[i + j*N] = A[j + i*N];
A[j + i*N] = temp;
}
}
for (i = N - 1; i >= 1; --i) {
memmove(A + i*M, A + i*N, sizeof(*A)*M);
}
for (i = 0; i < N; ++i) {
iter = N + i * M;
for (j = 0; j < p; ++j) {
A[iter + j] = buf[j*N + i];
}
}
free(buf);
}
else if (M < N) {
p = N - M;
buf = (float*)malloc(sizeof(float)* p * M);
for (i = 0; i < M; ++i) {
iter = M + i*N;
for (j = 0; j < p; ++j) {
buf[j*M + i] = A[iter + j];
}
}
for (i = 1; i < M; ++i) {
memmove(A + i*M, A + i * N, sizeof(*A)*M);
}
for (i = 0; i < M; ++i) {
for (j = i + 1; j < M; ++j) {
temp = A[i + j*M];
A[i + j*M] = A[j + i*M];
A[j + i*M] = temp;
}
}
memcpy(A + M*M, buf, sizeof(*A)*p*M);
free(buf);
}
}
void mdisplay(float *A, int row, int col) {
int i,j;
printf("\n MATRIX Order : %d X %d \n \n",row,col);
for (i = 0; i < row; i++) {
printf("R%d: ",i);
for ( j = 0; j < col;j++) {
printf("%f ",A[i*col + j]);
}
printf(":R%d \n",i);
}
}
void madd(float* A, float* B, float* C,int rows,int cols) {
int N,i;
/*
* C = A + B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] + B[i];
}
}
void msub(float* A, float* B, float* C,int rows,int cols) {
int N,i;
/*
* C = A - B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] - B[i];
}
}
void scale(float *A, int rows, int cols, float alpha) {
int N,i;
/*
* A = alpha * A
* Matrix A is overwritten.
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N;++i) {
A[i] = alpha * A[i];
}
}
void nmult(float* A, float* B, float* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * B[j + k * cb];
}
}
}
}
void tmult(float* A, float* B, float* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
float *BT;
BT = (float*) malloc(sizeof(float) * ca * cb);
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
mtranspose(B,ca,cb,BT);
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * BT[k + j * rb];
}
}
}
free(BT);
}
void recmult(float* A, float* B, float* C,int m,int n, int p,int sA,int sB, int sC) {
int m2,n2,p2;
register int i,j,k;
int u,v,t;
if (m + n + p <= CUTOFF) {
//#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sB;
u = i * sC;
t = j + u;
for (k = 0; k < n;++k) {
C[t] += A[k + v] * B[j + k * sC];
}
}
}
} else if (m >= n && m >= p) {
m2 = (int) ceil((float) m / 2.0);
recmult(A,B,C,m2,n,p,sA,sB,sC);
recmult(A + m2*sB,B,C + m2*sC,m-m2,n,p,sA,sB,sC);
} else if (n >= m && n >= p) {
n2 = (int) ceil((float) n / 2.0);
recmult(A,B,C,m,n2,p,sA,sB,sC);
recmult(A+n2,B+n2*sC,C,m,n-n2,p,sA,sB,sC);
} else if (p >= m && p >= n) {
p2 = (int) ceil((float) p / 2.0);
recmult(A,B,C,m,n,p2,sA,sB,sC);
recmult(A,B+p2,C+p2,m,n,p-p2,sA,sB,sC);
}
}
void rmult(float* A, float* B, float* C,int m,int n, int p) {
int strA,strB,strC;
int N;
register int i;
strA = m;
strB = n;
strC = p;
N = m * p;
for(i = 0; i < N; ++i) {
C[i] = 0.;
}
recmult(A,B,C,m,n,p,strA,strB,strC);
}
int findrec(int *a, int *b, int *c) {
int rec;
float da,db,dc,mul;
da = (float) *a;
db = (float) *b;
dc = (float) *c;
rec = 0;
mul = 1.;
while (da + db + dc > (float) CUTOFF) {
rec++;
mul *= 2;
da = ceilf(da/2.0f);
db = ceilf(db/2.0f);
dc = ceilf(dc/2.0f);
}
*a = (int) da * mul;
*b = (int) db * mul;
*c = (int) dc * mul;
return rec;
}
void add_zero_pad(float *X, int rows, int cols, int zrow, int zcol,float *Y) {
int r,c,i,j,u,v;
r = rows + zrow;
c = cols + zcol;
for (i = 0; i < rows;++i) {
u = i*c;
v = i * cols;
for (j = 0; j < cols;++j) {
Y[u + j] = X[v + j];
}
for (j = cols; j < c;++j) {
Y[u + j] = 0.;
}
}
for (i = rows; i < r;++i) {
u = i*c;
for(j = 0; j < c;++j) {
Y[u + j] = 0.;
}
}
}
void remove_zero_pad(float *Y, int rows, int cols, int zrow, int zcol,float *Z) {
int r,c,i,j,u,v;
r = rows - zrow;
c = cols - zcol;
for (i = 0; i < r; ++i) {
u = i * c;
v = i * cols;
for (j = 0; j < c; ++j) {
Z[j + u] = Y[j + v];
}
}
}
void madd_stride(float* A, float* B, float* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
}
void msub_stride(float* A, float* B, float* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
}
void rmadd_stride(float* A, float* B, float* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmadd_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmadd_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void rmsub_stride(float* A, float* B, float* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmsub_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmsub_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void srecmult(float* A, float* B, float* C,int m,int n, int p,int sA,int sB, int sC) {
register int i,j,k;
int u,v,t;
float sum;
float *A1,*B1;
float *a11,*a12,*a21,*a22;
float *b11,*b12,*b21,*b22;
float *c11,*c12,*c21,*c22;
float *m1,*m2,*m3,*m4,*m5,*m6,*m7;
int sm1,sm2,sm3,sm4,sm5,sm6,sm7;
int sA1,sB1;
if (m + n + p <= CUTOFF) {
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sA;
u = i * sC;
t = j + u;
sum = 0.;
for (k = 0; k < n;++k) {
sum += A[k + v] * B[j + k * sB];
}
C[t] = sum;
}
}
} else {
m/=2;n/=2;p/=2;
// A size mXn, C size mXp
a11 = A;
a12 = A + n;
a21 = A + m * sA;
a22 = A + n + m * sA;
//B size nXp
b11 = B;
b12 = B + p;
b21 = B + n * sB;
b22 = B + p + n * sB;
//C size mXp
c11 = C;
c12 = C + p;
c21 = C + m * sC;
c22 = C + p + m * sC;
// m matrices have dimension m X p each. See http://en.wikipedia.org/wiki/Strassen_algorithm
m1 = (float*) malloc(sizeof(float) *m * p);
sm1 = p;
m3 = (float*) malloc(sizeof(float) *m * p);
sm3 = p;
m4 = (float*) malloc(sizeof(float) *m * p);
sm4 = p;
m2 = c21;
sm2 = sC;
m5 = c12;
sm5 = sC;
m6 = c22;
sm6 = sC;
m7 = c11;
sm7 = sC;
//m1
sA1 = n;
sB1 = p;
A1 = (float*) malloc(sizeof(float) * m * n);
B1 = (float*) malloc(sizeof(float) * n * p);
madd_stride(a11,a22,A1,m,n,sA,sA,sA1);
madd_stride(b11,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m1,m,n,p,sA1,sB1,sm1);
free(A1);
free(B1);
//m2
A1 = (float*) malloc(sizeof(float) * m * n);
madd_stride(a21,a22,A1,m,n,sA,sA,sA1);
srecmult(A1,b11,m2,m,n,p,sA1,sB,sm2);
free(A1);
//m3
B1 = (float*) malloc(sizeof(float) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b12,b22,B1,n,p,sB,sB,sB1);
srecmult(a11,B1,m3,m,n,p,sA,sB1,sm3);
free(B1);
//m4
B1 = (float*) malloc(sizeof(float) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b21,b11,B1,n,p,sB,sB,sB1);
srecmult(a22,B1,m4,m,n,p,sA,sB1,sm4);
free(B1);
//m5
A1 = (float*) malloc(sizeof(float) * m * n);
madd_stride(a11,a12,A1,m,n,sA,sA,sA1);
srecmult(A1,b22,m5,m,n,p,sA1,sB,sm5);
free(A1);
//m6
A1 = (float*) malloc(sizeof(float) * m * n);
B1 = (float*) malloc(sizeof(float) * n * p);
msub_stride(a21,a11,A1,m,n,sA,sA,sA1);
madd_stride(b11,b12,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m6,m,n,p,sA1,sB1,sm6);
free(A1);
free(B1);
//m7
A1 = (float*) malloc(sizeof(float) * m * n);
B1 = (float*) malloc(sizeof(float) * n * p);
msub_stride(a12,a22,A1,m,n,sA,sA,sA1);
madd_stride(b21,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m7,m,n,p,sA1,sB1,sm7);
free(A1);
free(B1);
// c11
A1 = (float*) malloc(sizeof(float) * m * p);
sA1 = p;
madd_stride(m1,m7,m7,m,p,sm1,sm7,sm7);
msub_stride(m4,m5,A1,m,p,sm4,sm5,sA1);
madd_stride(m7,A1,m7,m,p,sm7,sA1,sm7);
free(A1);
// c22
A1 = (float*) malloc(sizeof(float) * m * p);
sA1 = p;
madd_stride(m1,m6,m6,m,p,sm1,sm6,sm6);
msub_stride(m3,m2,A1,m,p,sm3,sm2,sA1);
madd_stride(m6,A1,m6,m,p,sm6,sA1,sm6);
free(A1);
//c12
madd_stride(m3,m5,m5,m,p,sm3,sm5,sm5);
//c21
madd_stride(m4,m2,m2,m,p,sm4,sm2,sm2);
free(m1);
free(m3);
free(m4);
}
}
void smult(float* A, float* B, float* C,int m,int n, int p) {
int a,b,c,nrec;
float *X,*Y,*Z,*P;
a = m;
b = n;
c = p;
nrec = findrec(&a,&b,&c);
X = (float*) malloc(sizeof(float) * a * b);
Y = (float*) malloc(sizeof(float) * b * c);
Z = (float*) malloc(sizeof(float) * a * c);
P = (float*) malloc(sizeof(float) * (a/2) * (c/2));
add_zero_pad(A,m,n,a-m,b-n,X);
add_zero_pad(B,n,p,b-n,c-p,Y);
srecmult(X,Y,Z,a,b,c,b,c,c);
// Memory allocation needs work
remove_zero_pad(Z,a,c,a-m,c-p,C);
// free X,Y,Z
free(X);
free(Y);
free(Z);
free(P);
}
void mmult(float* A, float* B, float* C,int m,int n, int p) {
if (m+n+p <= CUTOFF/2) {
nmult(A,B,C,m,n,p);
} else {
smult(A,B,C,m,n,p);
}
}
static int pludecomp(float *A,int N,int *ipiv) {
int k,j,l,c1,c2,mind,tempi;
float ld,mult,mval,temp;
for(k=0;k < N;++k)
ipiv[k] = k;
for(k = 0; k < N-1; ++k) {
//c2 = k*N;
mval = fabsf(A[k*N + k]);
mind = k;
for (j=k+1; j < N;++j) {
if (mval < fabs(A[j*N + k])) {
mval = A[j*N + k];
mind = j;
}
}
if ( mind != k) {
c1 = k *N;
c2 = mind * N;
tempi = ipiv[mind];
ipiv[mind] = ipiv[k];
ipiv[k] = tempi;
for (j = 0; j < N;j++) {
temp = A[c1 + j];
*(A + c1 + j) = *(A + c2 + j);
*(A + c2 + j) = temp;
}
}
c2 = k*N;
ld = A[c2 + k];
if (ld != 0.) {
for (j = k+1; j < N; ++j) {
c1 = j*N;
mult = A[c1+k] /= ld;
//printf("\n k %d j %d mult %f \n",k,j,mult);
for(l = k+1; l < N; ++l) {
A[c1+l] -= mult * A[c2 + l];
}
}
}
}
return 0;
}
void ludecomp(float *A,int N,int *ipiv) {
pludecomp(A,N,ipiv);
}
void linsolve(float *A,int N,float *b,int *ipiv,float *x) {
int i,j,c1,l;
float *y;
float sum;
y = (float*) malloc(sizeof(float) *N);
/*
* Two step Solution L * U * x = b
* Let U*x = y
* Solve L * y = b for y (Forward Substitution
* Solve U * x = b for x (Back Substitution)
*/
for(i = 0; i < N;++i) {
y[i] = 0.;
x[i] = 0.;
if ( A[i*N + i] == 0.) {
printf("The Matrix system does not have a unique solution");
exit(1);
}
//printf("\n B %d",ipiv[i]);
}
// Forward Substitution
y[0] = b[ipiv[0]];
for(i = 1; i < N; ++i) {
sum = 0.;
c1 = i*N;
for(j = 0; j < i; ++j) {
sum += y[j] * A[c1 + j];
}
y[i] = b[ipiv[i]] - sum;
}
// Back Substitution
x[N - 1] = y[N - 1]/A[N * N - 1];
for (i = N - 2; i >= 0; i--) {
sum = 0.;
c1 = i*(N+1);
l=0;
for(j = i+1; j < N;j++) {
l++;
sum += A[c1 + l] * x[j];
}
x[i] = (y[i] - sum) / A[c1];
}
free(y);
}
void minverse(float *A,int N,int *ipiv,float *inv) {
int i,j,stride;
float *col,*x;
col = (float*) malloc(sizeof(float) * N);
x = (float*) malloc(sizeof(float) * N);
for (i = 0; i < N; ++i) {
col[i] = 0.;
x[i] = 0.;
}
for (i = 0; i < N; ++i) {
col[i] = 1.;
linsolve(A,N,col,ipiv,x);
stride = i;
for(j = 0; j < N;++j) {
inv[stride] = x[j];
stride+= N;
}
col[i] = 0.;
}
free(x);
free(col);
}
void eye(float *mat,int N) {
int i,j,t;
for(i = 0;i < N;++i) {
for(j =0; j < N;++j) {
t = i*N;
if (i == j) {
mat[t+j] = 1.;
} else {
mat[t+j] = 0.;
}
}
}
}
static float house_1(float*x,int N,float *v) {
float beta,mu,temp;
float *sigma;
int i;
sigma = (float*) malloc(sizeof(float) * 1);
if (N > 1) {
mmult(x+1,x+1,sigma,1,N-1,1);
} else {
sigma[0] = 0.0;
}
v[0] =1.;
for (i = 1; i < N;++i) {
v[i] = x[i];
}
if (sigma[0] == 0. && x[0] >= 0.) {
beta = 0.;
} else if (sigma[0] == 0. && x[0] < 0.) {
beta = -2.;
}else {
mu = sqrtf(sigma[0] + x[0] * x[0]);
if (x[0] <= 0.) {
v[0] = x[0] - mu;
} else {
v[0] = - sigma[0] / (x[0] + mu);
}
temp = v[0];
beta = (2.0f * v[0] * v[0]) /(sigma[0] + v[0] * v[0]);
for (i = 0; i < N;++i) {
v[i] /= temp;
}
}
free(sigma);
return beta;
}
float house_2(float*x,int N,float *v) {
float sgn,beta,sc;
float *sigma,*e;
int i;
sigma = (float*) malloc(sizeof(float) * 1);
e = (float*) malloc(sizeof(float) * N);
beta = 2.0;
sgn = 1.0;
mmult(x,x,sigma,1,N,1);
sigma[0] = sqrtf(sigma[0]);
e[0] =1.;
for (i = 1; i < N;++i) {
e[i] = 0.;
}
if (x[0] > 0.) {
sgn = 1.0;
} else if (x[0] < 0.) {
sgn = -1.0;
} else if (x[0] == 0.) {
sgn = 0.;
}
sc = sigma[0] * sgn;
//scale(e,N,1,sc);
e[0] *= sc;
for(i = 0; i < N;++i) {
v[i] = e[i] + x[i];
}
mmult(v,v,sigma,1,N,1);
sigma[0] = sqrtf(sigma[0]);
for(i = 0; i < N;++i) {
v[i] = v[i] / sigma[0];
}
free(sigma);
free(e);
return beta;
}
float house(float*x,int N,float *v) {
float beta;
beta = house_1(x,N,v);
return beta;
}
void housemat(float *v, int N,float beta,float *mat) {
float *temp;
temp = (float*) malloc(sizeof(float) * N * N);
eye(mat,N);
mmult(v,v,temp,N,1,N);
scale(temp,N,N,beta);
msub(mat,temp,mat,N,N);
free(temp);
}
void qrdecomp(float *A, int M, int N,float *bvec) {
int j,i,k,u,t;
float *x,*v,*AT,*w;
float beta;
if (M < N) {
printf("M should be greater than or equal to N");
exit(1);
}
x = (float*) malloc(sizeof(float) * M);
v = (float*) malloc(sizeof(float) * M);
AT = (float*) malloc(sizeof(float) * M * N);
w = (float*) malloc(sizeof(float) * M * M);
for(j = 0; j < N;++j) {
for(i=j;i < M;++i) {
x[i-j] = A[i*N+j];
}
beta = house(x,M-j,v);
bvec[j] = beta;
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = A[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,beta);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
A[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
if (j < M) {
for(i=j+1;i < M;++i) {
A[i*N+j] = v[i-j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void getQR(float *A,int M,int N,float *bvec,float *Q, float *R) {
int i,j,k,t,u;
float *x,*v,*AT,*w;
x = (float*) malloc(sizeof(float) * M);
v = (float*) malloc(sizeof(float) * M);
AT = (float*) malloc(sizeof(float) * M * N);
w = (float*) malloc(sizeof(float) * M * M);
for(i = 0; i < N;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i > j) {
R[t+j] = 0.;
} else {
R[t+j] = A[t+j];
}
}
}
for(i = 0; i < M;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i == j) {
Q[t+j] = 1.;
} else {
Q[t+j] = 0.;
}
}
}
for(j = N-1; j >= 0;--j) {
v[0] = 1.;
for(i=j+1;i < M;++i) {
v[i-j] = A[i*N+j];
}
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = Q[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,bvec[j]);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
Q[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void hessenberg(float *A,int N) {
int k,i,j,t,u;
float *x,*v,*AT,*w;
float beta;
x = (float*) malloc(sizeof(float) * N);
v = (float*) malloc(sizeof(float) * N);
AT = (float*) malloc(sizeof(float) * N * N);
w = (float*) malloc(sizeof(float) * N);
for (k = 0; k < N-2;++k) {
for(i=k + 1;i < N;++i) {
x[i-k-1] = A[i*N+k];
//printf("x %f \n",x[i-k-1]);
}
beta = house(x,N-k-1,v);
for (i=k+1; i < N; i++) {
t = i * N;
u = 0;
for (j=k; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=(N-k-1);
}
}
//mdisplay(AT,N-k,N-k-1);
mmult(AT,v,w,N-k,N-k-1,1);
scale(w,N-k,1,beta);
mmult(v,w,AT,N-k-1,1,N-k);
//mdisplay(AT,N-k-1,N-k);
for (i=k+1; i < N; i++) {
t = i * N;
for (j=k; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-k) + j - k];
}
}
//mdisplay(A,N,N);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
AT[u+j-k-1] = A[t+j];
}
}
//mdisplay(AT,N,N-k-1);
mmult(AT,v,w,N,N-k-1,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,N-k-1);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void francisQR(float *A,int N) {
int m,n,k,q,r,t,u,i,j;
float s,t2,beta;
float *x,*v,*AT,*w;
int NN;
/*
* Reference - Algorithm 7.5.1 Golub,van Loan Matrix Computations 3rd Edition
*/
x = (float*) malloc(sizeof(float) * 3);
v = (float*) malloc(sizeof(float) * 3);
AT = (float*) malloc(sizeof(float) * 3 * N);
w = (float*) malloc(sizeof(float) * N);
n = N-1;
m = n-1;
NN = N*N;
s = A[NN-1] + A[NN-N-2];
t2 = A[NN-1] * A[NN-N-2] - A[NN-2] * A[NN-N-1];
x[0] = A[0]*A[0] + A[1]*A[N] - s*A[0] + t2;
x[1] = A[N]*(A[0] + A[N+1] - s);
x[2] = A[N] * A[N+N+1];
if (N <= 2) {
return;
}
for (k = -1; k < N - 3;++k) {
beta = house(x,3,v);
//mdisplay(x,3,1);
if (k > 0) {
q = k;
} else {
q = 0;
}
//printf("q %d \n",q);
for (i=k+1; i < k+4; i++) {
t = i * N;
u = 0;
for (j=q; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=3;
}
}
mmult(AT,v,w,N-q,3,1);
scale(w,N-q,1,beta);
mmult(v,w,AT,3,1,N-q);
for (i=k+1; i < k+4; i++) {
t = i * N;
for (j=q; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-q) + j - q];
}
}
//mdisplay(A,N,N);
if (k+4 >= n) {
r = N;
} else {
r = k+4+1;
}
//printf("r %d \n",r);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
AT[u+j-k-1] = A[t+j];
}
}
mmult(AT,v,w,r,3,1);
scale(w,r,1,beta);
mmult(w,v,AT,r,1,3);
//mdisplay(AT,N,N-k-1);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
//mdisplay(A,N,N);
x[0] = A[N*(k+2) + k+1];
x[1] = A[N*(k+3) + k+1];
if (k < n-3) {
x[2] = A[N*(k+4) + k+1];
}
//mdisplay(x,3,1);
}
//mdisplay(x,2,1);
beta = house(x,2,v);
for (i=n-1; i < N; i++) {
t = i * N;
u = 0;
for (j=n-2; j < N; j++) {
AT[u+i-n+1] = A[j+t];
u+=2;
}
}
mmult(AT,v,w,3,2,1);
scale(w,3,1,beta);
mmult(v,w,AT,2,1,3);
for (i=n-1; i < N; i++) {
t = i * N;
for (j=n-2; j < N; j++) {
A[t+j] -= AT[(i-n+1)*3 + j - n + 2];
}
}
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
AT[u+j-n+1] = A[t+j];
}
}
mmult(AT,v,w,N,2,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,2);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
A[t+j] -= AT[u+j-n+1];
}
}
free(x);
free(v);
free(AT);
free(w);
}
void eig22(float *A, int stride,float *eigre,float *eigim) {
int N;
float a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,t2,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1.0f/sqrtf(2.0f);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1.0f + sqrtf(1+t1*t1));
c = 1.0f/sqrtf(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
eigre[0] = eigre[1] = at11;
eigim[0] = sqrtf(-at12 * at21);
eigim[1] = -sqrtf(-at12 * at21);
if ( at12*at21 >= 0) {
if (at12 == 0) {
c = 0;
s = 1;
c2 = 0;
s2 = 1;
cs = 0;
} else {
t = sqrtf(at21/at12);
t2 = t * t;
cs = t/(1+t2);
c2 = (1+t2);
s2 = t2 /(1+t2);
}
eigim[0] = eigim[1] = 0.0;
eigre[0] = at11 - cs * (at12 + at21);
eigre[1] = at11 + cs * (at12 + at21);
}
}
int francis_iter(float *A, int N, float *H) {
int success,brkpoint;
int i,j,it,p,q,t,u;
float *temp;
success = 0;
brkpoint = 30 * N;
it = 0;
p = N - 1;
temp = (float*) malloc(sizeof(float) * N * N);
for(i = 0; i < N*N;++i) {
H[i] = A[i];
}
hessenberg(H,N);
while (p > 1 && it < brkpoint) {
while (p > 1 && (H[N*p + p-1] == 0 || H[N*(p-1) + p-2] == 0)) {
if (H[N*p + p-1] == 0) {
p--;
} else if (H[N*(p-1) + p-2] == 0) {
p=p-2;
}
}
if (p > 0) {
q = p-1;
while (q > 0 && fabs(H[N*q + q-1]) != 0) {
q--;
}
//printf("%d %d \n",q,p);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
temp[u+j-q] = H[t+j];
}
}
francisQR(temp,p-q+1);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
H[t+j] = temp[u+j-q];
}
}
//mdisplay(H,N,N);
for(i = q; i <= p-1;++i) {
if ( fabs(H[(i+1)*N+i]) <= TOL * (fabs(H[i*N+i]) + fabs(H[(i+1)*N+i+1]) ) ) {
H[(i+1)*N+i] = 0.;
}
}
it++;
//printf("iter %d \n",it);
}
}
if (it == brkpoint) {
success = 0;
} else {
success = 1;
}
free(temp);
return success;
}
static void eig2t(float *A, int stride) {
int N;
float a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1.0f/sqrtf(2.0f);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1.0f + sqrtf(1+t1*t1));
c = 1.0f/sqrtf(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
A[0] = at11;
A[1] = at12;
A[N] = at21;
A[N+1] = at22;
}
void eig(float *A,int N,float *eigre,float *eigim) {
int i,t,u,n;
float *H;
float t1,t2,cs;
H = (float*) malloc(sizeof(float) * N * N);
n = N - 1;
francis_iter(A,N,H);
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
eig2t(H+u+i,N);
i = i +2;
} else {
i++;
}
}
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
if (H[u+i+1] * H[t+i] < 0.) {
eigre[i] = H[u+i];
eigre[i+1] = H[t+i+1];
eigim[i] = sqrtf(-H[u+i+1] * H[t+i]);
eigim[i+1] = -sqrtf(-H[u+i+1] * H[t+i]);
} else {
if (H[u+i+1] == 0.) {
cs = 0.;
} else {
t1 = sqrtf(H[t+i]/H[u+i+1]);
t2 = t1 * t1;
cs = t1/(1+t2);
}
eigre[i] = H[u+i] - cs * (H[u+i+1] + H[t+i]);
eigre[i+1] = H[u+i] + cs * (H[u+i+1] + H[t+i]);
eigim[i] = 0.;
eigim[i+1] = 0.;
}
i= i + 2;
} else {
eigre[i] = H[u+i];
eigim[i] = 0.;
i++;
}
}
if (i == n) {
eigre[i] = H[N*N - 1];
eigim[i] = 0.;
}
free(H);
}
static int rcholu(float *A,int N, int stride, float *U22) {
int sc;
int j,i,u,w;
float u11;
if (N == 1) {
if (A[0] > 0) {
A[0] = sqrtf(A[0]);
return 0;
} else {
return -1;
}
} else {
if (A[0] < 0) {
return -1;
}
u11 = sqrtf(A[0]);
A[0] = u11;
for (j = 1; j < N;++j) {
A[j] /= u11;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
sc = rcholu(A+stride+1,N-1,stride,U22);
if (sc == -1) {
return -1;
}
}
return sc;
}
static int rbcholu(float *A,int N, int stride, float *UB, float *UT) {
int bs,bb,i,j,Nb,t,k,u,v,w,sc;
float *b,*x,*U12,*U12T;
float sum;
bs = (int) BLOCKSIZE;
bb = bs*bs;
if (N <= BLOCKSIZE) {
sc = rcholu(A,N,stride,UB);
if (sc == -1) {
return -1;
}
} else {
Nb = N - bs;
x = (float*) malloc(sizeof(float) * bs);
b = (float*) malloc(sizeof(float) * bs);
U12T = (float*) malloc(sizeof(float) * Nb * bs);
U12 = (float*) malloc(sizeof(float) * Nb * bs);
rcholu(A,bs,stride,UB); // U11
for (i =0; i < bs;++i) {
t = i *stride;
u = 0;
for(j = 0; j < N;++j) {
UT[u+i] = A[j+t];
u += bs;
}
}
for(k = 0; k < Nb;++k) {
u = k * bs;
for(i = 0; i < bs;++i) {
b[i] = UT[bb+u+i];
x[i] = 0.;
}
for (i = 0; i < bs;++i) {
t = i*bs;
sum = 0;
for (j = 0; j < i;++j) {
sum += UT[t+j] * x[j];
}
x[i] = (b[i] - sum) / UT[t+i];
}
v = bs + k;
for(i = 0; i < bs;++i) {
A[v] = x[i];
U12T[u+i] = x[i];
v += stride;
}
}
mtranspose(U12T,Nb,bs,U12);
mmult(U12T,U12,UT,Nb,bs,Nb);
free(U12T);
free(U12);
free(b);
free(x);
for (i = 0; i < Nb; ++i) {
u = bs * stride + bs + i * stride;
w = i * Nb;
for(j = i; j < Nb;j++) {
A[j + u] -= UT[j + w];
}
}
sc = rbcholu(A + bs * stride + bs,Nb,stride,UB,UT);
if (sc == -1) {
return -1;
}
}
return sc;
}
int cholu(float *A, int N) {
int stride,i,j,t,sc;
float *U22;
U22 = (float*) malloc(sizeof(float) * N * N);
stride = N;
sc = rcholu(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
return sc;
}
int bcholu(float *A, int N) {
int stride,i,j,t,b,sc;
float *UB,*UT;
b = (int) BLOCKSIZE;
UT = (float*) malloc(sizeof(float) * N * N);
UB = (float*) malloc(sizeof(float) * b * b);
stride = N;
sc = rbcholu(A,N,stride,UB,UT);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(UB);
free(UT);
return sc;
}
int chol(float *A, int N) {
int sc;
if ( N <= (int) BLOCKSIZE) {
sc = cholu(A,N);
} else {
sc = bcholu(A,N);
}
return sc;
}
static void rchold(float *A,int N, int stride, float *U22) {
int j,i,u,w;
float d1;
if (N == 1) {
return;
} else {
d1 = A[0];
for (j = 1; j < N;++j) {
A[j] /= d1;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
scale(U22,N-1,N-1,d1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
rchold(A+stride+1,N-1,stride,U22);
}
}
void chold(float *A, int N) {
int stride,i,j,t;
float *U22;
U22 = (float*) malloc(sizeof(float) * N * N);
stride = N;
rchold(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
}
void svd_sort(float *U,int M,int N,float *V,float *q) {
/*
* Pavel Sakov's CSA SVD sort routine is used with some minor
* modifications. See The License below
*/
/*
* Copyright (C) 2000-2008 Pavel Sakov and CSIRO
Redistribution and use of material from the package `csa', with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of material must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. The names of the authors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*/
int i,j;
float *UT,*VT,*qq;
int *pos;
UT = (float*) malloc(sizeof(float) * N * M);
VT = (float*) malloc(sizeof(float) * N * N);
qq = (float*) malloc(sizeof(float) * N);
pos = (int*) malloc(sizeof(int) * N);
for(i = 0;i < N;++i) {
qq[i] = q[i];
}
for(i = 0;i < M*N;++i) {
UT[i] = U[i];
}
for(i = 0;i < N*N;++i) {
VT[i] = V[i];
}
//mtranspose(U,M,N,UT);
//mtranspose(V,N,N,VT);
sort1d(q,N,pos);
for(i = 0; i < N;++i) {
q[i] = qq[pos[i]];
for (j = 0; j < M;++j) {
U[j*N+i] = UT[j*N+pos[i]];
}
for (j = 0; j < N;++j) {
V[j*N+i] = VT[j*N+pos[i]];
}
}
free(UT);
free(VT);
free(qq);
free(pos);
}
int svd(float *A,int M,int N,float *U,float *V,float *q) {
int i,j,k,l,t,t2,ierr,cancel,iter,l1;
float eps,g,x,s,temp,f,h,c,y,z,scale;
float *e;
/*
THIS SUBROUTINE IS THE MODIFIED C TRANSLATION OF THE
EISPACK FORTRAN TRANSLATION OF THE ALGOL PROCEDURE SVD,
NUM. MATH. 14, 403-420(1970) BY GOLUB AND REINSCH.
HANDBOOK FOR AUTO. COMP., VOL II-LINEAR ALGEBRA, 134-151(1971).
*/
/*
* U = MXN
* V - NXN
* Q - NX1
*/
/*
* The program return error codes
*
* Code 0 if the computation is successful
* Code -1 If M < N . Transpose the matrix such that rows > columns and trye again
* Code 15 if maximum iterations are reached without achieving convergence. Increase SVDMAXITER value
* in matrix.h header file. Default Value is 50
*
*/
if (M < N) {
printf("Rows (M) should be greater than Columns (B) \n");
printf("Retry By Transposing the Input Matrix");
return -1;
}
e = (float*) malloc(sizeof(float) * N);
ierr = 0;
eps = macheps();
g = scale = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M) {
for(k = i; k < M;++k) {
scale += fabsf(U[k*N+i]);
}
if (scale != 0.0) {
for(k = i; k < M;++k) {
t = k * N;
U[t+i] /= scale;
temp = U[t+i];
s += temp*temp;
}
f = U[i*N+i];
g = (f < 0) ? sqrtf(s) : -sqrtf(s);
h = f * g - s;
U[i*N+i] = f - g;
if (i < N - 1) {
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += U[t+i]*U[t+j];
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += f * U[t+i];
}
}
}
for(k = i; k < M;++k) {
t = k * N;
U[t+i] *= scale;
}
}
}
q[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M && i != N - 1) {
t = i *N;
for(k = l; k < M;++k) {
scale += fabsf(U[t+k]);
}
if (scale != 0.0) {
for(k = l; k < N;++k) {
U[t+k] /= scale;
temp = U[t+k];
s = s + temp*temp;
}
f = U[t+l];
g = (f < 0) ? sqrtf(s) : -sqrtf(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
for (k = l; k < N; k++)
U[t+k] *= scale;
}
}
temp = fabsf(q[i]) + fabsf(e[i]);
if (x < temp) {
x = temp;
}
}
/*
ierr = 0;
eps = macheps();
tol = eps;
g = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = g;
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
temp = U[t+i];
s += temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[i*N+i];
g = (f < 0) ? sqrtf(s) : -sqrtf(s);
h = f * g - s;
U[i*N+i] = f - g;
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += (U[t+i]*U[t+j]);
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += (f * U[t+i]);
}
}
}
q[i] = g;
s = 0.0;
t = i * N;
for(k = l; k < N;++k) {
temp = U[t+k];
s = s + temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[t+l];
g = (f < 0) ? sqrtf(s) : -sqrtf(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
*/
//Accumulating Right Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
if (i < N - 1) {
if (g != 0.0) {
h = U[t+i+1] * g;
for(j = l;j < N;++j) {
V[j*N+i] = U[t+j] / h;
}
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < N;++k) {
s += U[t+k] * V[k*N+j];
}
for(k = l; k < N;++k) {
V[k*N+j] += (s * V[k*N+i]);
}
}
}
for(j = l; j < N;++j) {
V[t+j] = V[j*N+i] = 0.0;
}
}
V[t+i] = 1.0;
g = e[i];
l = i;
}
//Accumulating Left Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
l = i+1;
g = q[i];
if (i < N - 1) {
for(j = l;j < N;++j) {
U[t+j] = 0.0;
}
}
if (g != 0.0) {
if (i != N - 1) {
//h = U[t+i] * g;
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < M;++k) {
s += (U[k*N+i] * U[k*N+j]);
}
f = (s / U[t+i]) / g;
for(k = i; k < M;++k) {
U[k*N+j] += (f * U[k*N+i]);
}
}
}
for(j = i; j < M;++j) {
U[j*N+i] = U[j*N+i] / g;
}
} else {
for(j = i; j < M;++j) {
U[j*N+i] = 0.0;
}
}
U[t+i] += 1.0;
}
// mdisplay(U,M,N);
eps = eps * x;
for(k = N - 1; k >= 0; --k) {
iter = 0;
while(1) {
iter++;
if (iter > SVDMAXITER) {
printf("Convergence Not Achieved \n");
return 15;
}
cancel = 1;
for(l = k; l >= 0; --l) {
if (fabs(e[l]) <= eps) {
cancel = 0; //test f convergence
break;
}
if (fabs(q[l-1]) <= eps) {
//Cancel
break;
}
}
if (cancel) {
c = 0.0;
s = 1.0;
l1 = l - 1;
for(i = l; i <= k;++i) {
f = s*e[i];
e[i] *= c;
if (fabs(f) <= eps) {
break;
}
g = q[i];
h = q[i] = hypotf(f,g);
c = g/h;
s = -f/h;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+l1];
z = U[t+i];
U[t+l1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
}
z = q[k];
if (l != k) {
x = q[l];
y = q[k-1];
g = e[k-1];
h = e[k];
f = 0.5f * (((g + z) / h) * ((g - z) / y) + y / h - h / y);
g = hypotf(f,1.0);
if (f < 0.0) {
temp = f - g;
} else {
temp = f+g;
}
f = x - (z / x) * z + (h / x) * (y / temp - h);
//Next QR Transformation
c = s = 1.0;
for(i = l+1; i <= k;++i) {
g = e[i];
y = q[i];
h = s * g;
g = c * g;
e[i-1] = z = hypotf(f,h);
c = f / z;
s = h / z;
f = x * c + g * s;
g = g * c - x * s;
h = y * s;
y *= c;
for(j = 0; j < N;++j) {
t = j * N;
x = V[t+i-1];
z = V[t+i];
V[t+i-1] = x * c + z * s;
V[t+i] = z * c - x * s;
}
q[i-1] = z = hypotf(f,h);
if (z != 0.0) {
c = f / z;
s = h / z;
}
f = c * g + s * y;
x = c * y - s * g;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+i-1];
z = U[t+i];
U[t+i-1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
e[l] = 0.0;
e[k] = f;
q[k] = x;
} else {
//convergence
if (z < 0.0) {
q[k] = -z;
for (j = 0; j < N; j++) {
t = j *N;
V[t+k] = -V[t+k];
}
}
break;
}
}
}
svd_sort(U,M,N,V,q);
free(e);
return ierr;
}
int svd_transpose(float *A, int M, int N, float *U, float *V, float *q) {
int ret;
/* Call this routine if M < N
* U = MXM
* V - NXM
* Q - MX1
*/
if (M >= N) {
printf("M>=N. Use svd routine.\n");
exit(-1);
}
mtranspose(A, M, N, V);
ret = svd(V, N, M, V, U, q);
return ret;
}
static int rank_c(float *A, int M,int N) {
int i,rnk,ret;
float eps,tol,szmax,qmax;
float *U,*V,*q;
U = (float*) malloc(sizeof(float) * M*N);
V = (float*) malloc(sizeof(float) * N*N);
q = (float*) malloc(sizeof(float) * N);
eps = macheps();
rnk = 0;
if (M < N) {
//mtranspose(A,M,N,U);
szmax = (float) N;
} else {
szmax = (float) M;
}
ret = svd(A,M,N,U,V,q);
qmax = q[0];
if ( ret != 0) {
printf("Failed to Compute SVD");
free(U);
free(V);
free(q);
return -1;
}
tol = qmax*szmax *eps;
for(i = 0; i < N;++i) {
if (q[i] > tol) {
rnk++;
}
}
free(U);
free(V);
free(q);
return rnk;
}
int rank(float *A, int M,int N) {
int rnk;
float *AT;
AT = (float*) malloc(sizeof(float) * M*N);
if (M < N) {
mtranspose(A,M,N,AT);
rnk = rank_c(AT,N,M);
} else {
rnk = rank_c(A,M,N);
}
free(AT);
return rnk;
}
int lls_svd_multi(float *A, float *b, int M,int N, float *x) {
int rnk, ret, i;
float *U, *V, *q, *UT, *d;
float eps, tol, szmax, qmax;
if (M < N) {
printf("Rows (M) should be greater than Columns (B) \n");\
return -1;
}
U = (float*)malloc(sizeof(float)* M*N);
V = (float*)malloc(sizeof(float)* N*N);
q = (float*)malloc(sizeof(float)* N);
UT = (float*)malloc(sizeof(float)* M*N);
d = (float*)malloc(sizeof(float)* N);
/*
The code returns -1 if SVD computation fails else it returns the rank of the matrix A (and the real size of vector x)
*/
ret = svd(A, M, N, U, V, q);
if (ret != 0) {
printf("Failed to Compute SVD");
free(U);
free(V);
free(q);
free(UT);
free(d);
return -1;
}
szmax = (float)M;
eps = macheps();
rnk = 0;
qmax = q[0];
tol = qmax*szmax *eps;
for (i = 0; i < N; ++i) {
if (q[i] > tol) {
rnk++;
}
}
mtranspose(U, M, N, UT);
d = (float*)malloc(sizeof(float)* N);
mmult(UT, b, d, N, M, 1);
for (i = 0; i < rnk; ++i) {
d[i] /= q[i];
}
for (i = rnk; i < N; ++i) {
d[i] = 0.0;
}
mmult(V, d, x, N, N, 1);
free(U);
free(V);
free(q);
free(UT);
free(d);
return(rnk);
}
|
perfect_number.c | /* vim: set ft=c sw=4 ts=4: */
/* perfect_number.c
* Perfect number calculation in C */
#include "perfect_number.h"
#include <stdio.h>
#include "dynarr/DG_dynarr.h"
/** perfect number predicate */
bool is_perfect(unsigned n) {
unsigned sum = 0;
for (unsigned i = 1; i < n; i++) {
if (n % i == 0)
sum += i;
}
return (sum == n);
}
/** find perfect number < limit */
unsigned perfect_numbers(PerfectNumbers *pn, unsigned limit) {
da_init(*pn);
unsigned i;
#pragma omp parallel for private(i), schedule(dynamic)
for (i = 1; i < limit; i++) {
if (is_perfect(i)) {
da_push(*pn, i);
}
}
return da_count(*pn);
}
/** pretty-print perfect numbers */
void print_perfect_numbers(PerfectNumbers const *const pn) {
unsigned len = da_count(*pn);
printf("{");
if (len > 0)
printf(" %d", pn->p[0]);
for (unsigned i = 1; i < len; i++) {
printf(", %d", pn->p[i]);
}
printf(" }");
}
|
nodal_two_step_v_p_strategy_for_FSI.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: June 2018 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H
#define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_for_FSI.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity_for_FSI.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include "nodal_two_step_v_p_strategy.h"
#include "nodal_two_step_v_p_strategy_for_FSI.h"
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <fstream>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class NodalTwoStepVPStrategyForFSI : public NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategyForFSI);
/// Counted pointer of NodalTwoStepVPStrategy
//typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
/// Node type (default is: Node<3>)
typedef Node<3> NodeType;
/// Geometry type (using with given NodeType)
typedef Geometry<NodeType> GeometryType;
typedef std::size_t SizeType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mVelocityTolerance;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mPressureTolerance;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mMaxPressureIter;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mDomainSize;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mTimeOrder;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mReformDofSet;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpMomentumStrategy;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpPressureStrategy;
typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType;
typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType;
///@}
///@name Life Cycle
///@{
NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::InitializeStrategy(rSolverConfig);
}
NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart,
pVelocityLinearSolver,
pPressureLinearSolver,
ReformDofSet,
VelTol,
PresTol,
MaxPressureIterations,
TimeOrder,
DomainSize)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~NodalTwoStepVPStrategyForFSI() {}
double Solve() override
{
// Initialize BDF2 coefficients
ModelPart &rModelPart = BaseType::GetModelPart();
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
double NormDp = 0.0;
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
unsigned int maxNonLinearIterations = mMaxPressureIter;
std::cout << "\n Solve with nodally_integrated_two_step_vp strategy at t=" << currentTime << "s" << std::endl;
if (timeIntervalChanged == true && currentTime > 10 * timeInterval)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
// bool momentumAlreadyConverged=false;
// bool continuityAlreadyConverged=false;
/* boost::timer solve_step_time; */
// std::cout<<" InitializeSolutionStep().... "<<std::endl;
this->UnactiveSliverElements();
InitializeSolutionStep(); // it fills SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids and inner solids
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "----- > iteration: " << it << std::endl;
if (it == 0)
{
ComputeNodalVolumeAndAssignFlagToElementType(); // it assings NODAL_VOLUME to fluid and SOLID_NODAL_VOLUME to solid. Interface nodes have both
this->InitializeNonLinearIterations(); // it fills SOLID_NODAL_SFD_NEIGHBOURS for solids and NODAL_SFD_NEIGHBOURS for fluids
}
// std::cout<<" CalcNodalStrainsAndStresses .... "<<std::endl;
CalcNodalStrainsAndStresses(); // it computes stresses and strains for fluid and solid nodes
// std::cout<<" CalcNodalStrainsAndStresses DONE "<<std::endl;
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep);
UpdateTopology(rModelPart, BaseType::GetEchoLevel());
// std::cout<<" ComputeNodalVolume .... "<<std::endl;
ComputeNodalVolume();
// std::cout<<" ComputeNodalVolume DONE "<<std::endl;
this->InitializeNonLinearIterations();
// std::cout<<" InitializeNonLinearIterations DONE "<<std::endl;
CalcNodalStrains();
// std::cout<<" CalcNodalStrains DONE "<<std::endl;
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations);
}
// if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("momentumConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// momentumAlreadyConverged=true;
// }
// if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("continuityConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// continuityAlreadyConverged=true;
// }
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1))
{
//this->ComputeErrorL2NormCaseImposedG();
//this->ComputeErrorL2NormCasePoiseuille();
this->CalculateAccelerations();
// std::ofstream myfile;
// myfile.open ("maxConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
}
if ((continuityConverged && momentumConverged) && it > 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
if (fixedTimeStep == true)
{
break;
}
}
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
if (mReformDofSet)
this->Clear();
/* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */
return NormDp;
}
void Initialize() override
{
std::cout << " \n Initialize in nodal_two_step_v_p_strategy_FSI" << std::endl;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size();
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS))
{
Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
if (rNodalSFDneighbours.size() != sizeSDFNeigh)
{
rNodalSFDneighbours.resize(sizeSDFNeigh, false);
}
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
else
{
std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE))
{
Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
if (rSpatialDefRate.size() != sizeStrains)
{
rSpatialDefRate.resize(sizeStrains, false);
}
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD))
{
Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
if (rFgrad.size1() != dimension)
{
rFgrad.resize(dimension, dimension, false);
}
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (rFgradVel.size1() != dimension)
{
rFgradVel.resize(dimension, dimension, false);
}
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
{
rSolidNodalStress.resize(sizeStrains, false);
}
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
{
rSolidNodalStress.resize(sizeStrains, false);
}
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS))
{
Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh)
{
rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false);
}
noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE))
{
Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
if (rSolidSpatialDefRate.size() != sizeStrains)
{
rSolidSpatialDefRate.resize(sizeStrains, false);
}
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD))
{
Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
if (rSolidFgrad.size1() != dimension)
{
rSolidFgrad.resize(dimension, dimension, false);
}
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (rSolidFgradVel.size1() != dimension)
{
rSolidFgradVel.resize(dimension, dimension, false);
}
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl;
}
AssignMaterialToEachNode(itNode);
}
// }
}
void AssignMaterialToEachNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double deviatoricCoeff = 0;
double volumetricCoeff = 0;
if (itNode->Is(SOLID))
{
double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
//deviatoricCoeff=deltaT*secondLame
deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
//volumetricCoeff=bulk*deltaT=deltaT*(firstLame+2*secondLame/3)
volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0;
}
else if (itNode->Is(FLUID) || itNode->Is(RIGID))
{
deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
}
if ((itNode->Is(SOLID) && itNode->Is(RIGID)))
{
itNode->FastGetSolutionStepValue(INTERFACE_NODE) = true;
}
else
{
itNode->FastGetSolutionStepValue(INTERFACE_NODE) = false;
}
double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0;
//currFirstLame=deltaT*firstLame
itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame;
itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff;
}
void UnactiveSliverElements()
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
MesherUtilities MesherUtils;
double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart);
double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size());
double ElementalVolume = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
unsigned int numNodes = itElem->GetGeometry().size();
if (numNodes == (dimension + 1))
{
if (dimension == 2)
{
ElementalVolume = (itElem)->GetGeometry().Area();
}
else if (dimension == 3)
{
ElementalVolume = (itElem)->GetGeometry().Volume();
}
if (ElementalVolume < CriticalVolume)
{
// std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl;
(itElem)->Set(ACTIVE, false);
}
else
{
(itElem)->Set(ACTIVE, true);
}
}
}
}
KRATOS_CATCH("");
}
void ComputeNodalVolume()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
Element::GeometryType &geometry = itElem->GetGeometry();
double elementalVolume = 0;
if (dimension == 2)
{
elementalVolume = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
elementalVolume = geometry.Volume() * 0.25;
}
// index = 0;
unsigned int numNodes = geometry.size();
for (unsigned int i = 0; i < numNodes; i++)
{
double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME);
nodalVolume += elementalVolume;
if (itElem->Is(SOLID))
{
double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
solidVolume += elementalVolume;
nodalVolume += -elementalVolume;
// if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before
// nodalVolume += -elementalVolume;
// }
}
}
}
// }
}
void ComputeNodalVolumeAndAssignFlagToElementType()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
Element::GeometryType &geometry = itElem->GetGeometry();
double elementalVolume = 0;
if (dimension == 2)
{
elementalVolume = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
elementalVolume = geometry.Volume() * 0.25;
}
// index = 0;
unsigned int numNodes = geometry.size();
unsigned int fluidNodes = 0;
unsigned int solidNodes = 0;
unsigned int interfaceNodes = 0;
for (unsigned int i = 0; i < numNodes; i++)
{
if ((geometry(i)->Is(FLUID) && geometry(i)->IsNot(SOLID)) || (geometry(i)->Is(FLUID) && geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true))
{
fluidNodes += 1;
}
if (geometry(i)->Is(SOLID))
{
solidNodes += 1;
}
if (geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
interfaceNodes += 1;
}
}
if (solidNodes == numNodes)
{
itElem->Set(SOLID);
// std::cout<<"THIS SOLID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (interfaceNodes == numNodes)
{
itElem->Set(SOLID);
// std::cout<<"THIS INTERFACE ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (fluidNodes == numNodes)
{
itElem->Set(FLUID);
// std::cout<<"THIS FLUID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (solidNodes == numNodes && fluidNodes == numNodes)
{
itElem->Reset(FLUID);
// std::cout<<"THIS ELEMENT WAS BOTH FLUID AND SOLID "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
for (unsigned int i = 0; i < numNodes; i++)
{
double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME);
nodalVolume += elementalVolume;
if (itElem->Is(SOLID))
{
double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
solidVolume += elementalVolume;
nodalVolume += -elementalVolume;
// if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before
// nodalVolume += -elementalVolume;
// }
// if(interfaceNodes==numNodes && solidDensity==0){
// std::cout<<"This interface element has not a correct density....I am assigning it the fluid density----- TODO: IMPROVE IT, TAKE FROM NEIGHBOURS"<<std::endl;
// double density=geometry(i)->FastGetSolutionStepValue(DENSITY);
// geometry(i)->FastGetSolutionStepValue(SOLID_DENSITY)=density;
// }
}
}
}
// }
}
void InitializeSolutionStep() override
{
FillNodalSFDVector();
}
void FillNodalSFDVector()
{
// std::cout << "FillNodalSFDVector(); ... " << std::endl;
ModelPart &rModelPart = BaseType::GetModelPart();
// #pragma omp parallel
// {
// ModelPart::NodeIterator NodesBegin;
// ModelPart::NodeIterator NodesEnd;
// OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
// for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
// {
for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++)
{
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == false)
{
this->SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER
if (itNode->Is(SOLID))
{
SetNeighboursOrderToSolidNode(itNode); // it assigns neighbours to solid inner nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER
}
}
else
{
SetNeighboursOrderToInterfaceNode(itNode); // it assigns neighbours to interface nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids
}
}
// }
// std::cout << "FillNodalSFDVector(); DONE " << std::endl;
}
void SetNeighboursOrderToSolidNode(ModelPart::NodeIterator itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node
Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rNodeOrderedNeighbours.size() != neighbourNodes)
rNodeOrderedNeighbours.resize(neighbourNodes, false);
noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes);
rNodeOrderedNeighbours[0] = itNode->Id();
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id();
}
}
}
void SetNeighboursOrderToInterfaceNode(ModelPart::NodeIterator itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1;
unsigned int fluidCounter = 1;
unsigned int solidCounter = 1;
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
fluidCounter += 1;
}
if (neighb_nodes[k].Is(SOLID))
{
solidCounter += 1;
}
}
}
Vector &rFluidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector &rSolidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rFluidNodeOrderedNeighbours.size() != fluidCounter)
rFluidNodeOrderedNeighbours.resize(fluidCounter, false);
if (rSolidNodeOrderedNeighbours.size() != solidCounter)
rSolidNodeOrderedNeighbours.resize(solidCounter, false);
noalias(rFluidNodeOrderedNeighbours) = ZeroVector(fluidCounter);
noalias(rSolidNodeOrderedNeighbours) = ZeroVector(solidCounter);
rFluidNodeOrderedNeighbours[0] = itNode->Id();
rSolidNodeOrderedNeighbours[0] = itNode->Id();
fluidCounter = 0;
solidCounter = 0;
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
fluidCounter += 1;
rFluidNodeOrderedNeighbours[fluidCounter] = neighb_nodes[k].Id();
}
if (neighb_nodes[k].Is(SOLID))
{
solidCounter += 1;
rSolidNodeOrderedNeighbours[solidCounter] = neighb_nodes[k].Id();
}
}
}
// fluidCounter+=1;
// solidCounter+=1;
// ModelPart& rModelPart = BaseType::GetModelPart();
// const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// const unsigned int sizeFluidSDFNeigh=fluidCounter*dimension;
// const unsigned int sizeSolidSDFNeigh=solidCounter*dimension;
// Vector& rFluidNodalSFDneighbours=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
// Vector& rSolidNodalSFDneighbours=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
// if(rFluidNodalSFDneighbours.size() != sizeFluidSDFNeigh)
// rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,false);
// if(rSolidNodalSFDneighbours.size() != sizeSolidSDFNeigh)
// rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,false);
// noalias(rFluidNodalSFDneighbours)=ZeroVector(sizeFluidSDFNeigh);
// noalias(rSolidNodalSFDneighbours)=ZeroVector(sizeSolidSDFNeigh);
// rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,true);
// rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,true);
// std::cout<<"rFluidNodeOrderedNeighbours "<<rFluidNodeOrderedNeighbours<<std::endl;
// std::cout<<"rSolidNodeOrderedNeighbours "<<rSolidNodeOrderedNeighbours<<std::endl;
// std::cout<<"rFluidNodalSFDneighbours "<<rFluidNodalSFDneighbours<<std::endl;
// std::cout<<"rSolidNodalSFDneighbours "<<rSolidNodalSFDneighbours<<std::endl;
}
void InitializeNodalVariablesForSolidRemeshedDomain(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1;
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
rSolidNodalStress.resize(sizeStrains, false);
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rSolidNodalDevStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rSolidNodalDevStress.size() != sizeStrains)
rSolidNodalDevStress.resize(sizeStrains, false);
noalias(rSolidNodalDevStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS))
{
Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh)
rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false);
noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS_ORDER))
{
Vector &rSolidNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rSolidNodalSFDneighboursOrder.size() != neighbourNodes)
rSolidNodalSFDneighboursOrder.resize(neighbourNodes, false);
noalias(rSolidNodalSFDneighboursOrder) = ZeroVector(neighbourNodes);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE))
{
Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
if (rSolidSpatialDefRate.size() != sizeStrains)
rSolidSpatialDefRate.resize(sizeStrains, false);
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD))
{
Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
if (rSolidFgrad.size1() != dimension)
rSolidFgrad.resize(dimension, dimension, false);
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (rSolidFgradVel.size1() != dimension)
rSolidFgradVel.resize(dimension, dimension, false);
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUMETRIC_DEF_RATE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_EQUIVALENT_STRAIN_RATE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0;
}
}
void CalcNodalStrainsAndStresses()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
double theta = 0.5;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
if (nodalVolume > 0)
{
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (interfaceFgrad.size1() != dimension)
interfaceFgrad.resize(dimension, dimension, false);
if (interfaceFgradVel.size1() != dimension)
interfaceFgradVel.resize(dimension, dimension, false);
noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension);
//I have to compute the stresses and strains two times because one time is for the solid and the other for the fluid
// Matrix interfaceFgrad=ZeroMatrix(dimension,dimension);
// Matrix interfaceFgradVel=ZeroMatrix(dimension,dimension);
//the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes.
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel);
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel;
CalcNodalStrainsAndStressesForInterfaceFluidNode(itNode);
}
if (solidNodalVolume > 0)
{
Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (solidInterfaceFgrad.size1() != dimension)
solidInterfaceFgrad.resize(dimension, dimension, false);
if (solidInterfaceFgradVel.size1() != dimension)
solidInterfaceFgradVel.resize(dimension, dimension, false);
noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension);
theta = 1.0;
// Matrix solidInterfaceFgrad=ZeroMatrix(dimension,dimension);
// Matrix solidInterfaceFgradVel=ZeroMatrix(dimension,dimension);
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel);
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad;
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel;
CalcNodalStrainsAndStressesForInterfaceSolidNode(itNode);
}
}
else
{
if (itNode->Is(SOLID) && solidNodalVolume > 0)
{
theta = 1.0;
ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta);
CalcNodalStrainsAndStressesForSolidNode(itNode);
}
else if (nodalVolume > 0)
{
theta = 0.5;
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsAndStressesForNode(itNode);
}
}
if (nodalVolume == 0 && solidNodalVolume == 0)
{ // if nodalVolume==0
theta = 0.5;
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
}
// }
// if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){
// CopyValuesToSolidNonInterfaceNodes(itNode);
// }
}
// }
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void CopyValuesToSolidNonInterfaceNodes(ModelPart::NodeIterator itNode)
{
Vector &solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector &solidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
Vector &solidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
double &volumetricDefRate = itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE);
Vector &solidCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
Vector &solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
unsigned int sizeNodalSFDneighboursId = nodalSFDneighboursId.size();
solidNodalSFDneighboursId.resize(sizeNodalSFDneighboursId, false);
Vector nodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
unsigned int sizeNodalSFDneigh = nodalSFDneigh.size();
solidNodalSFDneigh.resize(sizeNodalSFDneigh, false);
solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
solidNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
solidInterfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
solidSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
volumetricDefRate = itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE);
solidCauchyStress = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS);
solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
}
void CalcNodalStrainsAndStressesForInterfaceFluidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
double currFirstLame = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]));
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
// if(itNode->Is(SOLID))
// {
// nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0];
// nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1];
// nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2];
// nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0];
// nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1];
// nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2];
// }
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]);
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
// if(itNode->Is(SOLID))
// {
// nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0];
// nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1];
// nodalSigmaTot_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2];
// nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[3];
// nodalSigmaTot_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[4];
// nodalSigmaTot_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[5];
// nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0];
// nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1];
// nodalSigmaDev_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2];
// nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[3];
// nodalSigmaDev_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[4];
// nodalSigmaDev_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[5];
// }
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsAndStressesForInterfaceSolidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio));
double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
}
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3];
nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4];
nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3];
nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4];
nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5];
}
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsAndStressesForSolidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio));
double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
// if(itNode->Is(SOLID)){
// std::cout<<"solid node"<<std::endl;
// }
// if(itNode->Is(FLUID)){
// std::cout<<"FLUID node"<<std::endl;
// }
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// std::cout<<"currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl;
// }else{
// std::cout<<"NOT INTERFACE currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl;
// }
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
}
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3];
nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4];
nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3];
nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4];
nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5];
}
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz;
itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz;
itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsForSolidNode(ModelPart::NodeIterator itNode)
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
// Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
// double detFgrad=1.0;
// Matrix InvFgrad=ZeroMatrix(dimension,dimension);
// Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension);
double detFgrad = 1.0;
Matrix nodalFgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
nodalFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
//Inverse
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double DefVol = DefX + DefY;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double DefVol = DefX + DefY + DefZ;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
}
void CalcNodalStrainsForInterfaceSolidNode(ModelPart::NodeIterator itNode)
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
//Inverse
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double DefVol = DefX + DefY;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double DefVol = DefX + DefY + DefZ;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void CalcNodalStrains()
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
double theta = 1.0;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
if (nodalVolume > 0)
{
//I have to compute the strains two times because one time is for the solid and the other for the fluid
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (interfaceFgrad.size1() != dimension)
interfaceFgrad.resize(dimension, dimension, false);
if (interfaceFgradVel.size1() != dimension)
interfaceFgradVel.resize(dimension, dimension, false);
noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension);
// Matrix interfaceFgrad = ZeroMatrix(dimension,dimension);
// Matrix interfaceFgradVel = ZeroMatrix(dimension,dimension);
//the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes.
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel);
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel;
this->CalcNodalStrainsForNode(itNode);
}
if (solidNodalVolume > 0)
{
Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (solidInterfaceFgrad.size1() != dimension)
solidInterfaceFgrad.resize(dimension, dimension, false);
if (solidInterfaceFgradVel.size1() != dimension)
solidInterfaceFgradVel.resize(dimension, dimension, false);
noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension);
// Matrix solidInterfaceFgrad = ZeroMatrix(dimension,dimension);
// Matrix solidInterfaceFgradVel = ZeroMatrix(dimension,dimension);
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel);
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad;
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel;
CalcNodalStrainsForInterfaceSolidNode(itNode);
}
}
else
{
if (itNode->Is(SOLID) && solidNodalVolume > 0)
{
ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta);
CalcNodalStrainsForSolidNode(itNode);
}
else if (nodalVolume > 0)
{
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsForNode(itNode);
}
}
if (nodalVolume == 0 && solidNodalVolume == 0)
{ // if nodalVolume==0
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
}
// if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){
// CopyValuesToSolidNonInterfaceNodes(itNode);
// }
}
// }
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void ComputeAndStoreNodalDeformationGradientForSolidNode(ModelPart::NodeIterator itNode, double theta)
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
/* unsigned int idThisNode=nodalSFDneighboursId[0]; */
const unsigned int neighSize = nodalSFDneighboursId.size();
Matrix Fgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
if (dimension == 2)
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
unsigned int firstRow = 2;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
unsigned int neigh_nodes_id = neighb_nodes[i].Id();
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
if (neigh_nodes_id != other_neigh_nodes_id)
{
std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl;
}
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
firstRow += 2;
}
}
}
else
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
double dNdZi = rNodalSFDneigh[2];
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(0, 2) += dNdZi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
Fgrad(1, 2) += dNdZi * itNode->Y();
Fgrad(2, 0) += dNdXi * itNode->Z();
Fgrad(2, 1) += dNdYi * itNode->Z();
Fgrad(2, 2) += dNdZi * itNode->Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
unsigned int firstRow = 3;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
dNdZi = rNodalSFDneigh[firstRow + 2];
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(0, 2) += dNdZi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y();
Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z();
Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z();
Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
firstRow += 3;
}
}
}
itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD) = Fgrad;
itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL) = FgradVel;
KRATOS_CATCH("");
}
void ComputeAndStoreNodalDeformationGradientForInterfaceNode(ModelPart::NodeIterator itNode, Vector nodalSFDneighboursId, Vector rNodalSFDneigh, double theta, Matrix &Fgrad, Matrix &FgradVel)
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
/* unsigned int idThisNode=nodalSFDneighboursId[0]; */
const unsigned int neighSize = nodalSFDneighboursId.size();
noalias(Fgrad) = ZeroMatrix(dimension, dimension);
noalias(FgradVel) = ZeroMatrix(dimension, dimension);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighNodesSize = neighb_nodes.size();
if (dimension == 2)
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
unsigned int firstRow = 2;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
for (unsigned int k = 0; k < neighNodesSize; k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
if (neigh_nodes_id == other_neigh_nodes_id)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
Fgrad(0, 0) += dNdXi * neighb_nodes[k].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[k].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y();
VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
firstRow += 2;
break;
}
}
}
}
}
else
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
double dNdZi = rNodalSFDneigh[2];
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(0, 2) += dNdZi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
Fgrad(1, 2) += dNdZi * itNode->Y();
Fgrad(2, 0) += dNdXi * itNode->Z();
Fgrad(2, 1) += dNdYi * itNode->Z();
Fgrad(2, 2) += dNdZi * itNode->Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
unsigned int firstRow = 3;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++)
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
for (unsigned int k = 0; k < neighNodesSize; k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
if (neigh_nodes_id == other_neigh_nodes_id)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
dNdZi = rNodalSFDneigh[firstRow + 2];
VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
VelocityZ = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * neighb_nodes[k].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[k].X();
Fgrad(0, 2) += dNdZi * neighb_nodes[k].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y();
Fgrad(1, 2) += dNdZi * neighb_nodes[k].Y();
Fgrad(2, 0) += dNdXi * neighb_nodes[k].Z();
Fgrad(2, 1) += dNdYi * neighb_nodes[k].Z();
Fgrad(2, 2) += dNdZi * neighb_nodes[k].Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
firstRow += 3;
break;
}
}
}
}
}
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=Fgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=FgradVel;
KRATOS_CATCH("");
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
// std::cout<<" UpdateTopology ..."<<std::endl;
/* this->CalculateDisplacements(); */
CalculateDisplacementsAndResetNodalVariables();
BaseType::MoveMesh();
BoundaryNormalsCalculationUtilities BoundaryComputation;
BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel);
// std::cout<<" UpdateTopology DONE"<<std::endl;
KRATOS_CATCH("");
}
void CalculateDisplacementsAndResetNodalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
if (dimension == 3)
{
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
}
///// reset Nodal variables //////
Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
unsigned int sizeSDFNeigh = rNodalSFDneighbours.size();
// unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1;
// unsigned int sizeSDFNeigh=neighbourNodes*dimension;
i->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0;
i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0;
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
// if(i->FastGetSolutionStepValue(INTERFACE_NODE)==true){
Vector &rSolidNodalSFDneighbours = i->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
unsigned int solidSizeSDFNeigh = rSolidNodalSFDneighbours.size();
// unsigned int solidSizeSDFNeigh=solidNeighbourNodes*dimension;
i->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0;
noalias(rSolidNodalSFDneighbours) = ZeroVector(solidSizeSDFNeigh);
Vector &rSolidSpatialDefRate = i->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
Matrix &rSolidFgrad = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
Matrix &rSolidFgradVel = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
// }
}
// }
}
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "NodalTwoStepVPStrategyForFSI";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "NodalTwoStepVPStrategyForFSI";
}
// /// Print object's data.
// void PrintData(std::ostream& rOStream) const override
// {
// }
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
NodalTwoStepVPStrategyForFSI &operator=(NodalTwoStepVPStrategyForFSI const &rOther) {}
/// Copy constructor.
NodalTwoStepVPStrategyForFSI(NodalTwoStepVPStrategyForFSI const &rOther) {}
///@}
}; /// Class NodalTwoStepVPStrategyForFSI
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
|
omp_bug1_fix.c | /******************************************************************************
* FILE: omp_bug1.c
* DESCRIPTION:
* This example attempts to show use of the parallel for construct. However
* it will generate errors at compile time. Try to determine what is causing
* the error. See omp_bug1fix.c for a corrected version.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 04/06/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50
#define CHUNKSIZE 10
int main (int argc, char *argv[])
{
int i, chunk, tid;
float a[N], b[N], c[N];
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;
#pragma omp parallel shared(a,b,c,chunk) private(i,tid)
{
tid = omp_get_thread_num();
#pragma omp parallel for schedule(static,chunk)
for (i=0; i < N; i++)
{
c[i] = a[i] + b[i];
printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]);
}
/* end of parallel for construct */
}
}
|
2mm.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "2mm.h"
/* Array initialization. */
static
void init_array( int ni, int nj, int nk, int nl,
DATA_TYPE *alpha,
DATA_TYPE *beta,
DATA_TYPE POLYBENCH_2D( A, NI, NK, ni, nl ),
DATA_TYPE POLYBENCH_2D( B, NK, NJ, nk, nj ),
DATA_TYPE POLYBENCH_2D( C, NL, NJ, nl, nj ),
DATA_TYPE POLYBENCH_2D( D, NI, NL, ni, nl ) )
{
int i, j;
*alpha = 32412;
*beta = 2123;
for ( i = 0; i < ni; i++ )
for ( j = 0; j < nk; j++ ) {
A[i][j] = ( ( DATA_TYPE ) i * j ) / ni;
}
for ( i = 0; i < nk; i++ )
for ( j = 0; j < nj; j++ ) {
B[i][j] = ( ( DATA_TYPE ) i * ( j + 1 ) ) / nj;
}
for ( i = 0; i < nl; i++ )
for ( j = 0; j < nj; j++ ) {
C[i][j] = ( ( DATA_TYPE ) i * ( j + 3 ) ) / nl;
}
for ( i = 0; i < ni; i++ )
for ( j = 0; j < nl; j++ ) {
D[i][j] = ( ( DATA_TYPE ) i * ( j + 2 ) ) / nk;
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array( int ni, int nl,
DATA_TYPE POLYBENCH_2D( D, NI, NL, ni, nl ) )
{
int i, j;
for ( i = 0; i < ni; i++ )
for ( j = 0; j < nl; j++ ) {
fprintf ( stderr, DATA_PRINTF_MODIFIER, D[i][j] );
if ( ( i * ni + j ) % 20 == 0 ) {
fprintf ( stderr, "\n" );
}
}
fprintf ( stderr, "\n" );
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_2mm( int ni, int nj, int nk, int nl,
DATA_TYPE alpha,
DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D( tmp, NI, NJ, ni, nj ),
DATA_TYPE POLYBENCH_2D( A, NI, NK, ni, nk ),
DATA_TYPE POLYBENCH_2D( B, NK, NJ, nk, nj ),
DATA_TYPE POLYBENCH_2D( C, NL, NJ, nl, nj ),
DATA_TYPE POLYBENCH_2D( D, NI, NL, ni, nl ) )
{
int i, j, k;
//~ #pragma omp parallel
//~ {
#pragma omp parallel for private (j, k)
for ( i = 0; i < _PB_NI; i++ )
for ( j = 0; j < _PB_NJ; j++ ) {
tmp[i][j] = 0;
for ( k = 0; k < _PB_NK; ++k ) {
tmp[i][j] += alpha * A[i][k] * B[k][j];
}
}
#pragma omp parallel for private (j, k)
for ( i = 0; i < _PB_NI; i++ )
for ( j = 0; j < _PB_NL; j++ ) {
D[i][j] *= beta;
for ( k = 0; k < _PB_NJ; ++k ) {
D[i][j] += tmp[i][k] * C[k][j];
}
}
//~ }
}
static
void *
xmalloc ( size_t num )
{
void* new = NULL;
int ret = posix_memalign ( &new, 32, num );
if ( ! new || ret ) {
fprintf ( stderr, "[PolyBench] posix_memalign: cannot allocate memory" );
exit ( 1 );
}
return new;
}
void *polybench_alloc_data( unsigned long long int n, int elt_size )
{
/* FIXME: detect overflow! */
size_t val = n;
void * ret;
val *= elt_size;
ret = xmalloc( val );
return ret;
}
int main( int argc, char** argv )
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
/* Variable declaration/allocation. */
DATA_TYPE alpha;
DATA_TYPE beta;
POLYBENCH_2D_ARRAY_DECL( tmp, DATA_TYPE, NI, NJ, ni, nj );
POLYBENCH_2D_ARRAY_DECL( A, DATA_TYPE, NI, NK, ni, nk );
POLYBENCH_2D_ARRAY_DECL( B, DATA_TYPE, NK, NJ, nk, nj );
POLYBENCH_2D_ARRAY_DECL( C, DATA_TYPE, NL, NJ, nl, nj );
POLYBENCH_2D_ARRAY_DECL( D, DATA_TYPE, NI, NL, ni, nl );
POLYBENCH_2D_ARRAY_ALLOC( tmp, DATA_TYPE, NI, NJ, ni, nj );
POLYBENCH_2D_ARRAY_ALLOC( A, DATA_TYPE, NI, NK, ni, nk );
POLYBENCH_2D_ARRAY_ALLOC( B, DATA_TYPE, NK, NJ, nk, nj );
POLYBENCH_2D_ARRAY_ALLOC( C, DATA_TYPE, NL, NJ, nl, nj );
POLYBENCH_2D_ARRAY_ALLOC( D, DATA_TYPE, NI, NL, ni, nl );
/* Initialize array(s). */
init_array ( ni, nj, nk, nl, &alpha, &beta,
POLYBENCH_ARRAY( A ),
POLYBENCH_ARRAY( B ),
POLYBENCH_ARRAY( C ),
POLYBENCH_ARRAY( D ) );
/* Run kernel. */
kernel_2mm ( ni, nj, nk, nl,
alpha, beta,
POLYBENCH_ARRAY( tmp ),
POLYBENCH_ARRAY( A ),
POLYBENCH_ARRAY( B ),
POLYBENCH_ARRAY( C ),
POLYBENCH_ARRAY( D ) );
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce( print_array( ni, nl, POLYBENCH_ARRAY( D ) ) );
/* Be clean. */
POLYBENCH_FREE_ARRAY( tmp );
POLYBENCH_FREE_ARRAY( A );
POLYBENCH_FREE_ARRAY( B );
POLYBENCH_FREE_ARRAY( C );
POLYBENCH_FREE_ARRAY( D );
return 0;
}
|
GB_unop__minv_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_uint16_uint16)
// op(A') function: GB (_unop_tran__minv_uint16_uint16)
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = GB_IMINV_UNSIGNED (z, 16) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_uint16_uint16)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 16) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 16) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_uint16_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolutiondepthwise_5x5_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m128 _bias0 = bias ? _mm_loadu_ps(bias + g * 4) : _mm_setzero_ps();
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* r5 = img0.row(5);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j < outw; j++)
{
__m128 _sum0 = _bias0;
__m128 _sum1 = _bias0;
__m128 _r00 = _mm_load_ps(r0);
__m128 _r01 = _mm_load_ps(r0 + 4);
__m128 _r02 = _mm_load_ps(r0 + 4 * 2);
__m128 _r03 = _mm_load_ps(r0 + 4 * 3);
__m128 _r04 = _mm_load_ps(r0 + 4 * 4);
__m128 _k00 = _mm_load_ps(k0);
__m128 _k01 = _mm_load_ps(k0 + 4);
__m128 _k02 = _mm_load_ps(k0 + 4 * 2);
__m128 _k03 = _mm_load_ps(k0 + 4 * 3);
__m128 _k04 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k03, _r03, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k04, _r04, _sum0);
__m128 _r10 = _mm_load_ps(r1);
__m128 _r11 = _mm_load_ps(r1 + 4);
__m128 _r12 = _mm_load_ps(r1 + 4 * 2);
__m128 _r13 = _mm_load_ps(r1 + 4 * 3);
__m128 _r14 = _mm_load_ps(r1 + 4 * 4);
_sum1 = _mm_comp_fmadd_ps(_k00, _r10, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k01, _r11, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k02, _r12, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k03, _r13, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k04, _r14, _sum1);
__m128 _k10 = _mm_load_ps(k0);
__m128 _k11 = _mm_load_ps(k0 + 4);
__m128 _k12 = _mm_load_ps(k0 + 4 * 2);
__m128 _k13 = _mm_load_ps(k0 + 4 * 3);
__m128 _k14 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k13, _r13, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k14, _r14, _sum0);
__m128 _r20 = _mm_load_ps(r2);
__m128 _r21 = _mm_load_ps(r2 + 4);
__m128 _r22 = _mm_load_ps(r2 + 4 * 2);
__m128 _r23 = _mm_load_ps(r2 + 4 * 3);
__m128 _r24 = _mm_load_ps(r2 + 4 * 4);
_sum1 = _mm_comp_fmadd_ps(_k10, _r20, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k11, _r21, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k12, _r22, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k13, _r23, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k14, _r24, _sum1);
__m128 _k20 = _mm_load_ps(k0);
__m128 _k21 = _mm_load_ps(k0 + 4);
__m128 _k22 = _mm_load_ps(k0 + 4 * 2);
__m128 _k23 = _mm_load_ps(k0 + 4 * 3);
__m128 _k24 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k23, _r23, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k24, _r24, _sum0);
__m128 _r30 = _mm_load_ps(r3);
__m128 _r31 = _mm_load_ps(r3 + 4);
__m128 _r32 = _mm_load_ps(r3 + 4 * 2);
__m128 _r33 = _mm_load_ps(r3 + 4 * 3);
__m128 _r34 = _mm_load_ps(r3 + 4 * 4);
_sum1 = _mm_comp_fmadd_ps(_k20, _r30, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k21, _r31, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k22, _r32, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k23, _r33, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k24, _r34, _sum1);
__m128 _k30 = _mm_load_ps(k0);
__m128 _k31 = _mm_load_ps(k0 + 4);
__m128 _k32 = _mm_load_ps(k0 + 4 * 2);
__m128 _k33 = _mm_load_ps(k0 + 4 * 3);
__m128 _k34 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k30, _r30, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k31, _r31, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k32, _r32, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k33, _r33, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k34, _r34, _sum0);
__m128 _r40 = _mm_load_ps(r4);
__m128 _r41 = _mm_load_ps(r4 + 4);
__m128 _r42 = _mm_load_ps(r4 + 4 * 2);
__m128 _r43 = _mm_load_ps(r4 + 4 * 3);
__m128 _r44 = _mm_load_ps(r4 + 4 * 4);
_sum1 = _mm_comp_fmadd_ps(_k30, _r40, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k31, _r41, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k32, _r42, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k33, _r43, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k34, _r44, _sum1);
__m128 _k40 = _mm_load_ps(k0);
__m128 _k41 = _mm_load_ps(k0 + 4);
__m128 _k42 = _mm_load_ps(k0 + 4 * 2);
__m128 _k43 = _mm_load_ps(k0 + 4 * 3);
__m128 _k44 = _mm_load_ps(k0 + 4 * 4);
k0 -= 4 * 20;
_sum0 = _mm_comp_fmadd_ps(_k40, _r40, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k41, _r41, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k42, _r42, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k43, _r43, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k44, _r44, _sum0);
__m128 _r50 = _mm_load_ps(r5);
__m128 _r51 = _mm_load_ps(r5 + 4);
__m128 _r52 = _mm_load_ps(r5 + 4 * 2);
__m128 _r53 = _mm_load_ps(r5 + 4 * 3);
__m128 _r54 = _mm_load_ps(r5 + 4 * 4);
_sum1 = _mm_comp_fmadd_ps(_k40, _r50, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k41, _r51, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k42, _r52, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k43, _r53, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k44, _r54, _sum1);
_mm_store_ps(outptr0, _sum0);
_mm_store_ps(outptr1, _sum1);
outptr0 += 4;
outptr1 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
r0 += 4 * 4 + w * 4;
r1 += 4 * 4 + w * 4;
r2 += 4 * 4 + w * 4;
r3 += 4 * 4 + w * 4;
r4 += 4 * 4 + w * 4;
r5 += 4 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m128 _sum0 = _bias0;
__m128 _r00 = _mm_load_ps(r0);
__m128 _r01 = _mm_load_ps(r0 + 4);
__m128 _r02 = _mm_load_ps(r0 + 4 * 2);
__m128 _r03 = _mm_load_ps(r0 + 4 * 3);
__m128 _r04 = _mm_load_ps(r0 + 4 * 4);
__m128 _k00 = _mm_load_ps(k0);
__m128 _k01 = _mm_load_ps(k0 + 4);
__m128 _k02 = _mm_load_ps(k0 + 4 * 2);
__m128 _k03 = _mm_load_ps(k0 + 4 * 3);
__m128 _k04 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k03, _r03, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k04, _r04, _sum0);
__m128 _r10 = _mm_load_ps(r1);
__m128 _r11 = _mm_load_ps(r1 + 4);
__m128 _r12 = _mm_load_ps(r1 + 4 * 2);
__m128 _r13 = _mm_load_ps(r1 + 4 * 3);
__m128 _r14 = _mm_load_ps(r1 + 4 * 4);
__m128 _k10 = _mm_load_ps(k0);
__m128 _k11 = _mm_load_ps(k0 + 4);
__m128 _k12 = _mm_load_ps(k0 + 4 * 2);
__m128 _k13 = _mm_load_ps(k0 + 4 * 3);
__m128 _k14 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k13, _r13, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k14, _r14, _sum0);
__m128 _r20 = _mm_load_ps(r2);
__m128 _r21 = _mm_load_ps(r2 + 4);
__m128 _r22 = _mm_load_ps(r2 + 4 * 2);
__m128 _r23 = _mm_load_ps(r2 + 4 * 3);
__m128 _r24 = _mm_load_ps(r2 + 4 * 4);
__m128 _k20 = _mm_load_ps(k0);
__m128 _k21 = _mm_load_ps(k0 + 4);
__m128 _k22 = _mm_load_ps(k0 + 4 * 2);
__m128 _k23 = _mm_load_ps(k0 + 4 * 3);
__m128 _k24 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k23, _r23, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k24, _r24, _sum0);
__m128 _r30 = _mm_load_ps(r3);
__m128 _r31 = _mm_load_ps(r3 + 4);
__m128 _r32 = _mm_load_ps(r3 + 4 * 2);
__m128 _r33 = _mm_load_ps(r3 + 4 * 3);
__m128 _r34 = _mm_load_ps(r3 + 4 * 4);
__m128 _k30 = _mm_load_ps(k0);
__m128 _k31 = _mm_load_ps(k0 + 4);
__m128 _k32 = _mm_load_ps(k0 + 4 * 2);
__m128 _k33 = _mm_load_ps(k0 + 4 * 3);
__m128 _k34 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k30, _r30, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k31, _r31, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k32, _r32, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k33, _r33, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k34, _r34, _sum0);
__m128 _r40 = _mm_load_ps(r4);
__m128 _r41 = _mm_load_ps(r4 + 4);
__m128 _r42 = _mm_load_ps(r4 + 4 * 2);
__m128 _r43 = _mm_load_ps(r4 + 4 * 3);
__m128 _r44 = _mm_load_ps(r4 + 4 * 4);
__m128 _k40 = _mm_load_ps(k0);
__m128 _k41 = _mm_load_ps(k0 + 4);
__m128 _k42 = _mm_load_ps(k0 + 4 * 2);
__m128 _k43 = _mm_load_ps(k0 + 4 * 3);
__m128 _k44 = _mm_load_ps(k0 + 4 * 4);
k0 -= 4 * 20;
_sum0 = _mm_comp_fmadd_ps(_k40, _r40, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k41, _r41, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k42, _r42, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k43, _r43, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k44, _r44, _sum0);
_mm_store_ps(outptr0, _sum0);
outptr0 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
r3 += 4 * 4;
r4 += 4 * 4;
}
}
}
static void convdw5x5s2_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m128 _bias0 = bias ? _mm_loadu_ps(bias + g * 4) : _mm_setzero_ps();
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m128 _sum0 = _bias0;
__m128 _r00 = _mm_load_ps(r0);
__m128 _r01 = _mm_load_ps(r0 + 4);
__m128 _r02 = _mm_load_ps(r0 + 4 * 2);
__m128 _r03 = _mm_load_ps(r0 + 4 * 3);
__m128 _r04 = _mm_load_ps(r0 + 4 * 4);
__m128 _k00 = _mm_load_ps(k0);
__m128 _k01 = _mm_load_ps(k0 + 4);
__m128 _k02 = _mm_load_ps(k0 + 4 * 2);
__m128 _k03 = _mm_load_ps(k0 + 4 * 3);
__m128 _k04 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k03, _r03, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k04, _r04, _sum0);
__m128 _r10 = _mm_load_ps(r1);
__m128 _r11 = _mm_load_ps(r1 + 4);
__m128 _r12 = _mm_load_ps(r1 + 4 * 2);
__m128 _r13 = _mm_load_ps(r1 + 4 * 3);
__m128 _r14 = _mm_load_ps(r1 + 4 * 4);
__m128 _k10 = _mm_load_ps(k0);
__m128 _k11 = _mm_load_ps(k0 + 4);
__m128 _k12 = _mm_load_ps(k0 + 4 * 2);
__m128 _k13 = _mm_load_ps(k0 + 4 * 3);
__m128 _k14 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k13, _r13, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k14, _r14, _sum0);
__m128 _r20 = _mm_load_ps(r2);
__m128 _r21 = _mm_load_ps(r2 + 4);
__m128 _r22 = _mm_load_ps(r2 + 4 * 2);
__m128 _r23 = _mm_load_ps(r2 + 4 * 3);
__m128 _r24 = _mm_load_ps(r2 + 4 * 4);
__m128 _k20 = _mm_load_ps(k0);
__m128 _k21 = _mm_load_ps(k0 + 4);
__m128 _k22 = _mm_load_ps(k0 + 4 * 2);
__m128 _k23 = _mm_load_ps(k0 + 4 * 3);
__m128 _k24 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k23, _r23, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k24, _r24, _sum0);
__m128 _r30 = _mm_load_ps(r3);
__m128 _r31 = _mm_load_ps(r3 + 4);
__m128 _r32 = _mm_load_ps(r3 + 4 * 2);
__m128 _r33 = _mm_load_ps(r3 + 4 * 3);
__m128 _r34 = _mm_load_ps(r3 + 4 * 4);
__m128 _k30 = _mm_load_ps(k0);
__m128 _k31 = _mm_load_ps(k0 + 4);
__m128 _k32 = _mm_load_ps(k0 + 4 * 2);
__m128 _k33 = _mm_load_ps(k0 + 4 * 3);
__m128 _k34 = _mm_load_ps(k0 + 4 * 4);
k0 += 4 * 5;
_sum0 = _mm_comp_fmadd_ps(_k30, _r30, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k31, _r31, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k32, _r32, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k33, _r33, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k34, _r34, _sum0);
__m128 _r40 = _mm_load_ps(r4);
__m128 _r41 = _mm_load_ps(r4 + 4);
__m128 _r42 = _mm_load_ps(r4 + 4 * 2);
__m128 _r43 = _mm_load_ps(r4 + 4 * 3);
__m128 _r44 = _mm_load_ps(r4 + 4 * 4);
__m128 _k40 = _mm_load_ps(k0);
__m128 _k41 = _mm_load_ps(k0 + 4);
__m128 _k42 = _mm_load_ps(k0 + 4 * 2);
__m128 _k43 = _mm_load_ps(k0 + 4 * 3);
__m128 _k44 = _mm_load_ps(k0 + 4 * 4);
k0 -= 4 * 20;
_sum0 = _mm_comp_fmadd_ps(_k40, _r40, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k41, _r41, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k42, _r42, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k43, _r43, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k44, _r44, _sum0);
_mm_store_ps(outptr0, _sum0);
outptr0 += 4;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
r3 += 4 * 2;
r4 += 4 * 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
GB_unaryop__ainv_int16_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int16_int8
// op(A') function: GB_tran__ainv_int16_int8
// C type: int16_t
// A type: int8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int16_int8
(
int16_t *Cx, // Cx and Ax may be aliased
int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int16_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
AssemblerParallel.h | //
// AssemblerParrallel.h
// Gauss
//
// Created by David Levin on 6/6/17.
//
//
#ifndef AssemblerParallel_h
#define AssemblerParallel_h
#ifdef GAUSS_OPENMP
#include <omp.h>
#include <Assembler.h>
#include <CoreDefines.h>
#include <Utilities.h>
#include <UtilitiesOMP.h>
#include <World.h>
namespace Gauss {
//A parallel assembler just uses one serial assembler per available thread
template<typename SerialAssembler>
class AssemblerParallelImpl : public AssemblerBase {
public:
using MatrixType = typename SerialAssembler::MatrixType;
AssemblerParallelImpl() {
//Number of available theads
std::cout<<"Number of Available Threads: "<<omp_thread_count()<<"\n";
m_serialAssemblers.resize(omp_thread_count());
}
inline void init(unsigned int m, unsigned int n=1, unsigned int rowOffset = 0, unsigned int colOffset = 0) {
//do everything in parallel
m_assembled.resize(m,n); // TODO need conditional to deal with case where MatrixType is a vector
m_assembled.setZero();
for(unsigned int ii=0; ii < m_serialAssemblers.size(); ++ii) {
m_serialAssemblers[ii].init(m,n, rowOffset, colOffset);
}
}
inline void finalize() {
//build giant triplets list and set it up
//I think I want to assemble seperately then add (split up my setTriplets time)
#pragma omp parallel
{
#pragma omp for
for(unsigned int ii=0; ii < m_serialAssemblers.size(); ++ii) {
m_serialAssemblers[ii].finalize();
}
}
for(unsigned int ii=0; ii<m_serialAssemblers.size(); ++ii) {
m_assembled += (*m_serialAssemblers[ii]);
}
}
//convenient overloads
inline auto & getMatrix() {
return m_assembled;
}
template<typename I, typename J, typename Input>
inline void assemble(I &i, J &j, Input &toAssembler) {
m_serialAssemblers[0].getImpl().assemble(i,j, toAssembler); //default single threaded behavior
//exit(1);
}
template<typename I, typename Input>
inline void assemble(I &i, Input &toAssembler) {
m_serialAssemblers[0].getImpl().assemble(i, toAssembler); //default single threaded behavior
//exit(1);
}
//next step, this needs to change to take in a list of i's, j's and sizes
//take in std::vectors for indices and size
inline void setOffset(unsigned int rowOffset, unsigned int colOffset = 0) {
AssemblerBase::setOffset(rowOffset, colOffset);
#pragma omp parallel
{
#pragma omp for
for(unsigned int ii=0; ii < m_serialAssemblers.size(); ++ii) {
m_serialAssemblers[ii].setOffset(rowOffset, colOffset);
}
}
}
inline SerialAssembler & operator[](unsigned int threadId) {
return m_serialAssemblers[threadId];
}
SerialAssembler & getAssembler(unsigned int threadId) {
return m_serialAssemblers[threadId];
}
//For MVP assemblers
inline void setX(MatrixType &x) {
#pragma omp parallel
{
#pragma omp for
for(unsigned int ii=0; ii < m_serialAssemblers.size(); ++ii) {
(m_serialAssemblers[ii].getImpl().setX(x));
}
}
}
//handle operators
template<typename Params>
inline AssemblerParallelImpl & operator*=(Params &x) {
#pragma omp parallel
{
#pragma omp for
for(unsigned int ii=0; ii < m_serialAssemblers.size(); ++ii) {
(m_serialAssemblers[ii].getImpl())*=x;
}
}
return *this;
}
protected:
std::vector<SerialAssembler> m_serialAssemblers;
//At some point I should figure out this type based on the constituent asemblers but for now just assume Eigen
typename SerialAssembler::ImplType::MatrixType m_assembled;
private:
};
template<typename DataType, typename SerialAssembler>
using AssemblerParallel = Assembler<DataType, AssemblerParallelImpl<SerialAssembler> >;
template<typename DataType, typename SerialAssembler>
struct IsParallel<Assembler<DataType, AssemblerParallelImpl<SerialAssembler> > > {
public:
constexpr static bool value = true;
};
}
#else
template<typename DataType, typename SerialAssembler>
using AssemblerParallel = SerialAssembler;
#endif //OPENMP is Available
#endif /* AssemblerParrallel_h */
|
ids.c | #include "globals.h"
/* Set IDs with spacing so an ID domain decomposition is more balanced */
void Make_IDs()
{
printf ( "Make IDs ..." );
fflush ( stdout );
#pragma omp parallel for
for ( int ipart = Param.Npart; ipart < Param.Npart; ipart++ ) {
P[ipart].ID = ipart + 1;
}
size_t delta = 127;
for ( ;; ) {
if ( ( Param.Npart % ++delta ) == 0 || delta > Param.Npart ) {
break;
}
}
if ( delta > Param.Npart ) {
delta = 1;
}
printf ( " ID spacing is %zu ...", delta );
fflush ( stdout );
if ( delta > 1 ) {
int id = 1 - delta, start = 1;
for ( int ipart = 0; ipart < Param.Npart; ipart++ ) {
id += delta;
if ( id > Param.Npart ) {
start++;
id = start;
}
P[ipart].ID = id;
}
}
printf ( " done\n\n" );
fflush ( stdout );
return;
}
|
GB_binop__bshift_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint16)
// C=scalar+B GB (_bind1st__bshift_uint16)
// C=scalar+B' GB (_bind1st_tran__bshift_uint16)
// C=A+scalar GB (_bind2nd__bshift_uint16)
// C=A'+scalar GB (_bind2nd_tran__bshift_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_bitshift_uint16 (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_uint16 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT16 || GxB_NO_BSHIFT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_uint16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_uint16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint16 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint16 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__minv_int64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_int64_int64)
// op(A') function: GB (_unop_tran__minv_int64_int64)
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CAST(z, aij) \
int64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 64) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_int64_int64)
(
int64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 64) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 64) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_int64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main_mini_morus.c | #include "main_mini_morus.h"
void copy_word(state_words* to, const state_words* from)
{
*to = *from;
}
void save_state(state* state_saved, state state, int i)
{
copy_word(&state_saved[i][0], &state[0]);
copy_word(&state_saved[i][1], &state[1]);
copy_word(&state_saved[i][2], &state[2]);
copy_word(&state_saved[i][3], &state[3]);
copy_word(&state_saved[i][4], &state[4]);
}
void sample(state* saved_state, state_words* saved_cipher, int num, struct RNG_state* seed)
{
state state;
state_words null_words;
// state_words output;
int i;
rand_init(state, seed);
null_word(&null_words);
for (i = 0; i < num; i++) {
save_state(saved_state, state, i);
encrypt(&saved_cipher[i], null_words, state);
}
save_state(saved_state, state, i);
}
int linear_sample(struct RNG_state* seed)
{
state saved_state[6];
state_words saved_cipher[5];
sample(saved_state, saved_cipher, 5, seed);
return linear(saved_state, saved_cipher);
}
void linear_stats(unsigned long long num) {
long long res = 0;
long long inbalance = 0;
unsigned long long bias = 0;
unsigned long long i = 0;
struct RNG_state* seed;
unsigned int small_seed = rand();
int tid;
printf("--------------------------\n");
printf("num: %llu\n", (num* omp_get_max_threads()));
printf("--------------------------\n");
// omp_set_num_threads(16);
# pragma omp parallel private(i, tid, seed) reduction(+:bias,inbalance)
{
tid = omp_get_thread_num();
seed = init_aesrand_r(small_seed,tid);
// printf ( " %6d %12d\n", tid, seed);
// try using openmp to speed things up
// #pragma omp parallel for reduction(+:bias,inbalance)
for(i = 0 ; i < num; ++i)
{
res = linear_sample(seed);
inbalance += 1 - 2*res;
bias += res;
if((i & 0x3ffffff) == 0)
{
printf("sampling, %llu -- thread %d\n", i, tid);
}
}
}
printf("--------------------------\n");
printf("num: %llu\n", (num* omp_get_max_threads()));
printf("inba: %lld\n", inbalance);
// printf("bias: %llu\n", bias);
printf("bias: %f\n", log2(fabs(inbalance)/(num* omp_get_max_threads())));
printf("--------------------------\n");
}
int main(int argc, char const *argv[]) {
long long int num = 1;
num <<= 20;
srand(time(NULL));
linear_stats(num);
return 0;
}
|
CPUMatrixImpl.h | //
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
// CPUMatrix.h : template implementation of all matrix functions on the CPU side
//
#pragma once
#include "Basics.h"
#include "File.h"
#include "CPUMatrix.h"
#include "TensorOps.h"
#include <assert.h>
#include <stdexcept>
#include <omp.h>
#include <math.h>
#include <random>
#include <chrono>
#include <exception>
#include <thread>
#include <iostream>
#include <algorithm>
#include <numeric>
#pragma warning(push)
#pragma warning(disable:4244) // 'conversion' conversion from 'type1' to 'type2', possible loss of data
#include <boost/random/normal_distribution.hpp>
#pragma warning(pop)
#include <boost/random/uniform_real_distribution.hpp>
#ifdef _WIN32
#define NOMINMAX
#include "Windows.h"
#else
#include <cfloat>
#endif
#ifdef LEAKDETECT
#include <vld.h>
#endif
#pragma warning(disable : 4100) // unreferenced formal parameter; "struct TensorOpReduction<ElemType, OPFN, typename ReductionOp, N, -1>" trigger this
#pragma warning(disable : 4127) // conditional expression is constant; "if (sizeof(ElemType)==sizeof(float))" triggers this
#pragma warning(disable : 4244) // unreachable code; triggered for unknown reasons
#pragma warning(disable : 4702) // conversion from 'double' to 'float'
#ifdef USE_MKL
// requires MKLML 0.11 and above
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#include <mkl_service.h>
#else
#ifdef _MSC_VER
// Visual Studio doesn't define standard complex types properly
#define HAVE_LAPACK_CONFIG_H
#define LAPACK_COMPLEX_STRUCTURE
#endif
#include <cblas.h>
#include <lapacke.h>
#endif
#define SWAP(a, b) \
{ \
(a) ^= (b); \
(b) ^= (a); \
(a) ^= (b); \
}
#define IDX2C(i, j, ld) (((j) * (ld)) + (i)) // 0 based indexing
namespace Microsoft { namespace MSR { namespace CNTK {
#pragma region Helpful Enum Definitions
enum class MatrixOrder
{
RowMajor = 101, // row-major arrays
ColMajor = 102 // column-major arrays
};
enum class MatrixTranspose : char
{
NoTrans = 'N', // trans='N'
Trans = 'T', // trans='T'
ConjTrans = 'C' // trans='C'
};
enum class SymMatrixType : char
{
Up = 'U', // symmetric matrix is stored in the upper part
Low = 'L', // symmetric matrix is stored in thelower part
Full = 'F', // full populated
NotSymmetric = 'N' // not a symmetric matrix
};
enum class MatrixOpSide : char
{
Left = 'L', // left multiply
Right = 'R', // right multiply
};
#pragma endregion Helpful Enum Definitions
#pragma region Constructors and Destructor
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix()
{
ZeroInit();
}
// helper to allocate an array of ElemType
// Use this instead of new[] to get NaN initialization for debugging.
template <class ElemType>
static ElemType* NewArray(size_t n)
{
// We need to allocate possibly one more element for the following reason.
// At some point we might want to fill a buffer with the result of a random
// number generator. The RNG is oblivious to whether the buffer is on the
// CPU or GPU but it needs to keep an accurate tally of how many numbers it
// has generated. The trouble stems from the fact that generating an odd
// number gaussians on the GPU is not supported so we must always
// generate an even number. So since we wouldn't know how to update the tally
// we are making this allocate one more element in the worst case.
ElemType* p = new ElemType[AsMultipleOf(n, 2)]();
#if 0 // _DEBUG
ElemType nan = Matrix<ElemType>::MakeNan(__LINE__);
for (size_t i = 0; i < n; i++)
p[i] = nan;
#endif
return p;
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols)
{
ZeroInit();
m_numRows = numRows;
m_numCols = numCols;
SetSizeAllocated(GetNumElements());
if (GetNumElements() != 0)
{
SetBuffer(NewArray<ElemType>(GetNumElements()), GetNumElements() * sizeof(ElemType));
}
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
ZeroInit();
SetValue(numRows, numCols, pArray, matrixFlags);
}
//copy constructor, deep copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& deepCopyFrom)
{
ZeroInit();
SetValue(deepCopyFrom);
}
//assignment operator, deep copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(const CPUMatrix<ElemType>& deepCopyFrom)
{
SetValue(deepCopyFrom);
return *this;
}
//move constructor, shallow copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(CPUMatrix<ElemType>&& moveFrom)
: Base(/* shallow */ true)
{
ShallowCopyFrom(moveFrom);
moveFrom.ZeroValues();
}
// Shortcut of default constructor + shallow copy, to avoid one initialization
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& shallowCopyFrom, bool shallow)
: Base(shallow)
{
ShallowCopyFrom(shallowCopyFrom);
}
//move assignment operator, shallow copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(CPUMatrix<ElemType>&& moveFrom)
{
if (this != &moveFrom)
{
ShallowCopyFrom(moveFrom);
// release the pointer from the source object so that the destructor won't release it twice
moveFrom.ZeroValues();
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::Clear()
{
ZeroInit();
}
#pragma endregion Constructors and Destructor
#pragma region Basic Operators
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::ColumnSlice(size_t startColumn, size_t numCols) const
{
if (startColumn + numCols > m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) m_numCols);
CPUMatrix<ElemType> slice(*this, /* shallow= */ true);
slice.m_numCols = numCols;
slice.m_sliceViewOffset = m_sliceViewOffset + startColumn * m_numRows;
return slice;
}
// set this(:, 0:numCols-1) = fromMatrix(:, startColumn : startColumn+numCols-1)
// TODO: why not say *this = ColumnSlice()?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > fromMatrix.m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) fromMatrix.m_numCols);
Clear();
ShallowCopyFrom(fromMatrix);
m_numCols = numCols;
m_sliceViewOffset = fromMatrix.m_sliceViewOffset + startColumn * m_numRows;
return *this;
}
// set this(: , startColumn:startColumn+numCols-1)= fromMatrix;
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > m_numCols)
LogicError("The slice is out of range of the destination matrix.");
if (numCols > fromMatrix.GetNumCols())
InvalidArgument("The slice (%d) is out of range of the source matrix (%d).", (int) numCols, (int) fromMatrix.GetNumCols());
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
memcpy(Data() + startColumn * m_numRows, fromMatrix.Data(), numCols * m_numRows * sizeof(ElemType));
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::CopyColumnsStrided(const CPUMatrix<ElemType>& fromMatrix, size_t numCols, size_t srcNumColsStride, size_t destNumColsStride)
{
if ((((numCols - 1) * srcNumColsStride) + 1) > fromMatrix.m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the source matrix.");
if ((((numCols - 1) * destNumColsStride) + 1) > m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the destination matrix.");
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
long n = (long) numCols, m = (long) m_numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
us(i + 1, j * destNumColsStride) = fromMatrix(i + 1, j * srcNumColsStride);
us(i + 2, j * destNumColsStride) = fromMatrix(i + 2, j * srcNumColsStride);
us(i + 3, j * destNumColsStride) = fromMatrix(i + 3, j * srcNumColsStride);
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
}
}
}
//for each column of a, we add all rows of a to this starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0, startRow = startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) = a(i, j);
us(startRow + 1, j) = a(i + 1, j);
us(startRow + 2, j) = a(i + 2, j);
us(startRow + 3, j) = a(i + 3, j);
}
// handle remaining stuffs
for (size_t i = m & ~3, startRow = startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) = a(i, j);
}
}
return *this;
}
//for each column of a, we assign numRows starting from startIndex to this
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (startIndex + numRows > a.GetNumRows())
LogicError("AssignRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
RequireSize(numRows, a.GetNumCols());
long n = (long) a.GetNumCols(); // note: OpenMP requires loop indices to be long, not size_t
long k = (long) a.GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// memory copy might be faster?
memcpy(Data() + j * numRows, a.Data() + j * k + startIndex, sizeof(ElemType) * numRows);
// //four-way unrolling
// for (long i=0, startRow = startIndex; i<(m & ~3); i+=4, startRow+=4)
// {
// us(i,j) = a(startRow,j);
// us(i+1,j) = a(startRow+1,j);
// us(i+2,j) = a(startRow+2,j);
// us(i+3,j) = a(startRow+3,j);
// }
// //handle remaining stuffs
// for (long i=m & ~3, startRow = startIndex+(m & ~3); i<m; i++, startRow++)
// {
// us(i,j) = a(startRow,j);
// }
}
return *this;
}
//for the row slice of this starting from startIndex we add a to it.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddToRowSliceValuesOf: input matrix a is empty.");
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) += a(i, j);
us(startRow + 1, j) += a(i + 1, j);
us(startRow + 2, j) += a(i + 2, j);
us(startRow + 3, j) += a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) += a(i, j);
}
}
return *this;
}
//for each column of this, we add row slice of a starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddWithRowSliceValuesOf: input matrix a is empty.");
if (GetNumRows() != numRows)
LogicError("AddWithRowSliceValuesOf: GetNumRows() != numRows.");
if (startIndex + numRows > a.GetNumRows())
LogicError("AddWithRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddWithRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(i, j) += a(startRow, j);
us(i + 1, j) += a(startRow + 1, j);
us(i + 2, j) += a(startRow + 2, j);
us(i + 3, j) += a(startRow + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(i, j) += a(startRow, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Diagonal() const
{
if (m_numRows != m_numCols)
LogicError("Diagonal can be called only for square matrix. (rows=%d, cols=%d)", (int) m_numRows, (int) m_numCols);
CPUMatrix<ElemType> diag(1, m_numCols);
auto& us = *this;
#pragma omp parallel for
for (long i = 0; i < m_numRows; i++)
{
diag(0, (size_t) i) = us(i, i);
}
return diag;
}
template <class ElemType>
void CPUMatrix<ElemType>::MinusOneAt(CPUMatrix<ElemType>& c, const size_t position)
{
if (position < c.GetNumElements())
c.Data()[position] -= 1.0;
else
RuntimeError("MinusOneAt: position is out of CPU matrix size");
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRepeatOf(const CPUMatrix<ElemType>& a, const size_t numRowRepeats, const size_t numColRepeats)
{
if (this == &a)
LogicError("AssignRepeatOf: a is the same as [this]. Does not support inplace repeat.");
if (a.IsEmpty())
LogicError("AssignRepeatOf: Matrix a is empty.");
RequireSize(a.GetNumRows() * numRowRepeats, a.GetNumCols() * numColRepeats);
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long q = 0; q < numColRepeats; q++)
{
for (long p = 0; p < numRowRepeats; p++)
{
long colOffset = q * n;
for (long j = 0; j < n; j++, colOffset++)
{
long rowOffset = p * m;
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4, rowOffset += 4)
{
us(rowOffset, colOffset) = a(i, j);
us(rowOffset + 1, colOffset) = a(i + 1, j);
us(rowOffset + 2, colOffset) = a(i + 2, j);
us(rowOffset + 3, colOffset) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++, rowOffset++)
{
us(rowOffset, colOffset) = a(i, j);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowRepeatValuesOf(const CPUMatrix<ElemType>& a, const size_t numRepeats)
{
if (a.IsEmpty())
LogicError("AddToRowRepeatValuesOf: input matrix a is empty.");
if (a.GetNumRows() != GetNumRows() * numRepeats)
LogicError("AddToRowRepeatValuesOf: a.GetNumRows() != GetNumRows() * numRepeats.");
long n = (long) a.GetNumCols(), m = (long) GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
us(i + 1, j) += a(k * m + i + 1, j);
us(i + 2, j) += a(k * m + i + 2, j);
us(i + 3, j) += a(k * m + i + 3, j);
}
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddFoldedPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Transpose()
{
if (IsEmpty())
LogicError("Transpose: Matrix is empty.");
CPUMatrix<ElemType> c;
c.AssignTransposeOf(*this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTransposeOf(const CPUMatrix<ElemType>& a)
{
if (this == &a)
LogicError("AssignTransposeOf: a is the same as [this]. Does not support inplace transpose.");
if (a.IsEmpty())
LogicError("AssignTransposeOf: Matrix a is empty.");
RequireSize(a.GetNumCols(), a.GetNumRows());
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(j, i) = a(i, j);
us(j, i + 1) = a(i + 1, j);
us(j, i + 2) = a(i + 2, j);
us(j, i + 3) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(j, i) = a(i, j);
}
}
return *this;
}
// dst[i] = src[i] * alpha + dst[i] * beta
// scale a column vector and add it to another
// The usual special case: If beta = 0, then dst[] is not read, and may be uninitialized or NaN.
template <class ElemType>
static void ScaleAndAddColumn(ElemType beta, ElemType* dst, const ElemType* src, size_t numRows, ElemType alpha)
{
if (alpha != 1) // rare case: just do the full thing
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + alpha * src[i];
else if (beta == 1) // used in backprop
for (size_t i = 0; i < numRows; i++)
dst[i] += src[i];
else if (beta == 0) // plain assignment
memcpy(dst, src, sizeof(ElemType) * numRows);
else // alpha=1, arbitrary beta: also rare case
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + src[i];
}
// *this[:,j] = a[:,idx[j]] * alpha + *this[:,j] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoGatherColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoGatherColumnsOf: Map must be a row vector.");
if (beta)
VerifySize(a.GetNumRows(), idx.GetNumCols());
else
Resize(a.GetNumRows(), idx.GetNumCols());
auto& us = *this;
// race-condition consideration: Since this loops over independent output columns, this has no race condition. Cf. DoScatterColumnsOf().
#pragma omp parallel for // TODO: Depending in circumstance, it may be more efficient to parallelize over rows.
foreach_column(jOut, us)
{
auto jInF = idx(0, jOut); // this is the column we need to get
if (std::isnan(jInF) || jInF < 0) // negative index means gap
continue;
size_t jIn = (size_t)jInF;
if (jIn >= a.GetNumCols())
InvalidArgument("DoGatherColumnsOf: Map out of bounds. %ld >= %ld", (long int)jIn, (long int)a.GetNumCols());
ScaleAndAddColumn(beta, &us(0,jOut), &a(0,jIn), us.GetNumRows(), alpha);
}
return *this;
}
// *this[:,idx[j]] = a[:,j] * alpha + *this[:,idx[j]] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoScatterColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoScatterColumnsOf: Map must be a row vector.");
if (idx.GetNumCols() != a.GetNumCols())
InvalidArgument("DoScatterColumnsOf: Map must have width of input vector.");
if (a.GetNumRows() != GetNumRows())
InvalidArgument("DoScatterColumnsOf: Output must have same height as input vector.");
auto& us = *this;
// pre-scale with beta upfront
// Scatter may add more than one source column to the same target, so we must pre-scale with beta, and then just keep adding.
Scale(beta, us); // if beta is 0, then this will be a memset()
ScatterValues(idx.Data(), a.Data(), us.Data(), alpha, idx.GetNumCols(), a.GetNumRows(), GetNumCols(), idx.GetNumRows());
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const ElemType v)
{
if (IsEmpty())
LogicError("SetValue: Matrix is empty.");
bool isFinite = std::numeric_limits<ElemType>::is_integer || std::isfinite((double) v);
if (isFinite && v == 0)
{
memset(Data(), 0, sizeof(ElemType) * GetNumElements());
}
else
{
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// 2-way thread parallelism is sufficient for the memory bound
// operation of just setting the values of an array.
const unsigned SETVALUE_NUM_THREADS = 2;
UNUSED(SETVALUE_NUM_THREADS); // in case OMP is turned off.
#pragma omp parallel for num_threads(SETVALUE_NUM_THREADS)
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = v;
bufPtr[i + 1] = v;
bufPtr[i + 2] = v;
bufPtr[i + 3] = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaskColumnsValue(const CPUMatrix<char>& columnsMask, ElemType val, size_t numColsPerMaskEntry)
{
if (GetNumCols() != (columnsMask.GetNumCols() * numColsPerMaskEntry))
RuntimeError("MaskColumnsValue: Matrix number of columns must equal 'column mask number of columns * numColsPerMaskEntry'.");
auto& us = *this;
long n = (long)columnsMask.GetNumCols(), m = (long) GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
if (columnsMask(0, j) == 1)
continue;
for (long k = 0; k < numColsPerMaskEntry; ++k)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
us(i + 1, (j * numColsPerMaskEntry) + k) = val;
us(i + 2, (j * numColsPerMaskEntry) + k) = val;
us(i + 3, (j * numColsPerMaskEntry) + k) = val;
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType* colPointer, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (colPointer == NULL)
return;
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = colPointer[i];
us(i + 1, j) = colPointer[i + 1];
us(i + 2, j) = colPointer[i + 2];
us(i + 3, j) = colPointer[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = colPointer[i];
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType val, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = val;
us(i + 1, j) = val;
us(i + 2, j) = val;
us(i + 3, j) = val;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = val;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const CPUMatrix<ElemType>& valMat, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (valMat.GetNumRows() != GetNumRows() || valMat.GetNumCols() != 1)
LogicError("The valMat matrix has incorrect number of rows or columns.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = valMat(i, 0);
us(i + 1, j) = valMat(i + 1, 0);
us(i + 2, j) = valMat(i + 2, 0);
us(i + 3, j) = valMat(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = valMat(i, 0);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUMatrix<ElemType>& deepCopyFrom)
{
if (this == &deepCopyFrom)
return;
SetValue(deepCopyFrom.GetNumRows(), deepCopyFrom.GetNumCols(), deepCopyFrom.Data(), 0);
}
#if 0
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUSparseMatrix<ElemType>& deepCopyFrom)
{
deepCopyFrom.AssignColumnSliceToDense(*this, 0, deepCopyFrom.GetNumCols());
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUSparseMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
#endif
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
if (pArray == nullptr && numRows * numCols > 0)
InvalidArgument("Invalid pArray. pArray == nullptr, but matrix is of size %d * %d = %d.", (int)numRows, (int)numCols, (int)(numRows * numCols));
SetFormat(matrixFormatDense);
SetComputeDeviceId(CPUDEVICE);
// if it's externally managed, then populate the structure
if (matrixFlags & matrixFlagDontOwnBuffer)
{
// free previous array allocation if any before overwriting
delete[] Buffer();
m_numRows = numRows;
m_numCols = numCols;
SetBuffer(pArray, GetNumElements() * sizeof(ElemType), true);
SetSizeAllocated(GetNumElements());
}
else
{
RequireSize(numRows, numCols);
if (!IsEmpty())
{
if (!(matrixFlags & matrixFormatRowMajor)) // compatible to internal structure
memcpy(Data(), pArray, GetNumElements() * sizeof(ElemType));
else // need to transpose
{
ElemType* bufPtr = Data();
auto& us = *this;
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_column (j, us)
{
cblas_dcopy((int) numRows, reinterpret_cast<double*>(pArray + j), (int) numCols, reinterpret_cast<double*>(bufPtr + LocateColumn(j)), 1);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_column (j, us)
{
{
#pragma warning(suppress : 4244)
cblas_scopy((int) numRows, reinterpret_cast<float*>(pArray + j), (int) numCols, reinterpret_cast<float*>(bufPtr + LocateColumn(j)), 1);
}
}
}
else
{
RuntimeError("Unsupported data format");
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const ElemType v)
{
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = v;
us(i + 1, i + 1) = v;
us(i + 2, i + 2) = v;
us(i + 3, i + 3) = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = v;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const CPUMatrix<ElemType>& vector)
{
if (IsEmpty() || vector.IsEmpty())
LogicError("SetDiagonalValue: Matrix is empty.");
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
if (vector.GetNumRows() != 1 && vector.GetNumCols() != 1)
LogicError("SetDiagonalValue: input vector must be a vector.");
if (vector.GetNumElements() == 1) // reduce to simple form
SetDiagonalValue(vector(0, 0));
else if (vector.GetNumRows() != GetNumRows() && vector.GetNumCols() != GetNumRows())
LogicError("SetDiagonalValue: input vector's dimension does not agree with [this].");
else
{
auto& us = *this;
long m = (long) GetNumRows();
if (vector.GetNumRows() == 1) // row vector
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(0, i);
us(i + 1, i + 1) = vector(0, i + 1);
us(i + 2, i + 2) = vector(0, i + 2);
us(i + 3, i + 3) = vector(0, i + 3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(0, i);
}
}
else
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(i, 0);
us(i + 1, i + 1) = vector(i + 1, 0);
us(i + 2, i + 2) = vector(i + 2, 0);
us(i + 3, i + 3) = vector(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(i, 0);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomValue(const ElemType low, const ElemType high, unsigned long seed)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::uniform_real_distribution<double> r((double)low, (double)high);
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = (ElemType)r(generator);
bufPtr[i + 1] = (ElemType)r(generator);
bufPtr[i + 2] = (ElemType)r(generator);
bufPtr[i + 3] = (ElemType)r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = (ElemType)r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomValue(RNGHandle& rngHandle, const ElemType low, const ElemType high)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::uniform_real_distribution<double> r((double)low, (double)high);
std::generate(Data(), Data() + GetNumElements(), [&cpuRNGHandle, &r]() {return (ElemType)r(cpuRNGHandle->Generator()); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGaussianRandomValue(RNGHandle& rngHandle, const ElemType mean, const ElemType stdev)
{
if (IsEmpty())
LogicError("SetGaussianRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::normal_distribution<double> r((double)mean, (double)stdev);
auto n = AsMultipleOf(GetNumElements(), 2);
std::generate(Data(), Data() + n, [&cpuRNGHandle, &r]() {return (ElemType)r(cpuRNGHandle->Generator()); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGumbelRandomValue(RNGHandle& rngHandle, const ElemType loc, const ElemType scale)
{
if (IsEmpty())
LogicError("SetGumbelRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::uniform_real_distribution<double> r(0, 1);
std::generate(Data(), Data() + GetNumElements(), [&cpuRNGHandle, &r, loc, scale]() {return (ElemType)(loc - scale * log(-log1p(-r(cpuRNGHandle->Generator())))); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetGaussianRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetGaussianRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<double> r((double)mean, (double)sigma);
// #pragma omp parallel for is not thread safe. Also the results would not be deterministic
foreach_coord (i, j, us)
{
us(i, j) = (ElemType)r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetTruncatedNormalRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetTruncatedNormalRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetTruncatedNormalRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long)time(NULL) : seed);
boost::random::normal_distribution<double> r((double)mean, (double)sigma);
const ElemType high = mean + 2 * sigma;
const ElemType low = mean - 2 * sigma;
// #pragma omp parallel for is not thread safe. Also the results would not be deterministic
foreach_coord(i, j, us)
{
ElemType tmp = 0;
do
tmp = (ElemType)r(generator);
while (tmp < low || tmp > high ); // Rejection sampling is fine here because the acceptance probability is about 0.9545
us(i, j) = tmp;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AddGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetUniformRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<double> r((double)mean, (double)sigma);
long m = (long) GetNumRows(), n = (long) GetNumCols();
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = (ElemType)r(generator);
us(i + 1, j) = (ElemType)r(generator);
us(i + 2, j) = (ElemType)r(generator);
us(i + 3, j) = (ElemType)r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = r(generator);
}
}
}
//maskRate: percentage of values masked out (similar to dropout rate)
//scaleValue: which scale value to set to the left ones (unmasked items).
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomMask(const ElemType maskRate, const ElemType scaleValue, RNGHandle& rngHandle)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
auto& us = *this;
boost::random::uniform_real_distribution<double> r(0, 1);
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType v;
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
v = (ElemType)r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? (ElemType)0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 1, j) = v <= maskRate ? (ElemType)0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 2, j) = v <= maskRate ? (ElemType)0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 3, j) = v <= maskRate ? (ElemType)0 : scaleValue;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v = (ElemType)r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? (ElemType)0 : scaleValue;
}
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::Adagrad(CPUMatrix<ElemType>& gradients, const bool needAveMultiplier)
{
ElemType aveMultiplier = 0;
if (IsEmpty() || gradients.GetNumCols() != GetNumCols() || gradients.GetNumRows() != GetNumRows())
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols());
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols())
LogicError("The matrix gradients must have the same rows and columns as this matrix.");
ElemType *a = Data(), *d_v = gradients.Data();
size_t n = GetNumElements();
const ElemType floor = 1e-16f;
ElemType a0, a1, a2, a3;
// disable omp here because aveMultiper needs to be added atomically. however, it seems the result is incorrect even if rmp atomic and amp critical are used.
// #pragma omp parallel for
for (long i = 0; i < (n & ~3); i += 4) // four-way unrolling
{
a[i] += d_v[i] * d_v[i];
a[i + 1] += d_v[i + 1] * d_v[i + 1];
a[i + 2] += d_v[i + 2] * d_v[i + 2];
a[i + 3] += d_v[i + 3] * d_v[i + 3];
a0 = sqrt(a[i] + floor);
a1 = sqrt(a[i + 1] + floor);
a2 = sqrt(a[i + 2] + floor);
a3 = sqrt(a[i + 3] + floor);
d_v[i] /= a0;
d_v[i + 1] /= a1;
d_v[i + 2] /= a2;
d_v[i + 3] /= a3;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0 + 1 / a1 + 1 / a2 + 1 / a3;
}
}
// get the last few elements if any
for (long i = n & ~3; i < n; i++)
{
a[i] += d_v[i] * d_v[i];
a0 = sqrt(a[i] + floor);
d_v[i] /= a0;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0;
}
}
if (needAveMultiplier && n > 0)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
void CPUMatrix<ElemType>::FSAdagrad(CPUMatrix<ElemType>& gradients,
CPUMatrix<ElemType>& functionValues,
ElemType learnRatePerSample,
ElemType momentum,
ElemType adaWeight,
ElemType adaMul,
ElemType unitGainFactor)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
if (adaSqr != 0.0f)
{
ElemType ada = sqrt(adaSqr);
ElemType w = adaMul * ((ElemType) 1.0 / ada);
if (w > 10.0f)
w = 10.0f;
g *= w;
}
if (momentum > 0.0f)
{
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
}
g *= learnRatePerSample;
val[i] -= g;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Adam(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learnRatePerSample,
ElemType momentum, ElemType adaWeight, ElemType adaMul, ElemType epsilon, ElemType unitGainFactor, bool adamax)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType ada;
if (!adamax)
{
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
ada = sqrt(adaSqr);
}
else
ada = smoothAda[i] = std::max(adaWeight * smoothAda[i], fabs_(g));
ElemType w = adaMul * (ElemType)( 1.0 / (ada + epsilon));
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
val[i] -= g * w * learnRatePerSample;
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::RmsProp(CPUMatrix<ElemType>& gradients,
ElemType RMS_GAMMA,
ElemType RMS_WGT_INC,
ElemType RMS_WGT_MAX,
ElemType RMS_WGT_DEC,
ElemType RMS_WGT_MIN,
const bool needAveMultiplier,
const bool initialized)
{
const ElemType floor = 1e-6f;
size_t n = gradients.GetNumElements();
ElemType* curr_grad = gradients.Data();
if (IsEmpty() || GetNumCols() < gradients.GetNumCols() * 3 || !initialized)
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols() * 3);
SetValue(0.0);
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* steps = Data() + 2 * n; // current step size
// initialize moving average of gradient-squared
for (long i = 0; i < n; i++)
avars[i] = curr_grad[i] * curr_grad[i];
// initialize starting step size
for (long i = 0; i < n; i++)
steps[i] = ElemType(0.02);
}
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* signs = Data() + n; // sign of previous gradient
ElemType* steps = Data() + 2 * n; // current step size
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols() * 3)
LogicError("The matrix gradients does not have expected dimensions.");
ElemType ONE_MINUS_GAMMA = ElemType(1.0) - RMS_GAMMA;
// int upd[] = {
// 2,2,0,
// 2,2,0,
// 1,1,1,
// 2,2,0,
// 1,2,1,
// 0,2,2,
// 1,1,1,
// 0,2,2,
// 0,2,2,
// };
// for (long i=0; i<n; i++)
// {
// avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
// // grad sign base 3: 0->neg, 1->zero, 2->pos
// const int grad_sign = 1 + (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
// // signs[i] contains three consecutive grad_sign
// signs[i] = 3*(int(signs[i]) % 9) + grad_sign;
// switch(upd[int(signs[i])])
// {
// case 0:
// steps[i] = max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
// break;
// case 2:
// steps[i] = min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
// break;
// }
// curr_grad[i] *= steps[i] / sqrt(avars[i] + floor);
// }
ElemType aveMultiplier = 0, a;
for (long i = 0; i < n; i++)
{
avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
const int grad_sign = (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
if (signs[i] * grad_sign > 0)
steps[i] = std::min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
else
steps[i] = std::max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
a = steps[i] / sqrt(avars[i] + floor);
curr_grad[i] *= a;
signs[i] = (ElemType) grad_sign;
if (needAveMultiplier)
aveMultiplier += a;
}
if (needAveMultiplier)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
template <typename GradType>
void CPUMatrix<ElemType>::AdaDelta(CPUMatrix<GradType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learningRate, ElemType rho, ElemType epsilon)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
GradType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothX2 = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = (ElemType)grad[i];
ElemType adaSqr = rho * smoothAda[i] + (1 - rho) * g * g;
smoothAda[i] = adaSqr;
ElemType x2 = smoothX2[i];
ElemType deltaX = -sqrt(x2 + epsilon) / sqrt(adaSqr + epsilon) * g;
smoothX2[i] = rho * smoothX2[i] + (1 - rho) * deltaX * deltaX;
val[i] += learningRate * deltaX;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AdaDeltaFlushTimestamps(size_t cols, ElemType rho, int* timestamps, int currentTimestamp)
{
// Sets all timestamps to 0 and updates the two logical buffers that this object holds
// so that their values are the same as if a dense implementation of adadelta had been used.
// This basically means that the values of these buffers are set to decay * original value
// where decay is rho ** (currentTimestamp - timestamp for that column)
auto rows = GetNumRows();
auto smoothAda = Data();
auto smoothX2 = Data() + cols * rows;
#pragma omp parallel for
for (auto col = 0; col < cols; ++col)
{
ElemType decay = std::pow(rho, ElemType(currentTimestamp - timestamps[col]));
auto offset = rows * col;
timestamps[col] = 0;
for (auto row = 0; row < rows; ++row)
{
smoothAda[offset + row] *= decay;
smoothX2[offset + row] *= decay;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Reshape(const size_t numRows, const size_t numCols)
{
if (numRows * numCols != GetNumElements())
InvalidArgument("Reshape: Total number of elements does not match.");
m_numRows = numRows;
m_numCols = numCols;
}
// RequireSize() -- Tests if the matrix is the right size. If not, resizes the matrix. This avoids the VerifyResizable check if we're already the right size.
template <class ElemType>
void CPUMatrix<ElemType>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() != numRows || GetNumCols() != numCols)
Resize(numRows, numCols, growOnly);
}
// Resize() -- change matrix size
// This function is cheap if the matrix size does not change.
// Current content is not preserved.
// If growOnly is true, resize will not reallocate memory if the current memory is large enough (i.e., will not shrink).
// If this object does not own its memory then new memory cannot be allocated (one can still shrink and/or reshape).
template <class ElemType>
void CPUMatrix<ElemType>::Resize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() == numRows && GetNumCols() == numCols)
return;
VerifyResizable(__func__);
size_t numElements = numRows * numCols;
if (numElements > GetSizeAllocated() || // grow allocation
(!growOnly && (numElements != GetSizeAllocated()))) // shrink allocation (not if 'growOnly')
{
// reallocate buffer
ElemType* pArray = nullptr;
if (numElements > 0)
{
pArray = NewArray<ElemType>(numElements);
}
// success: update the object
delete[] Buffer();
SetBuffer(pArray, numElements * sizeof(ElemType));
SetSizeAllocated(numElements);
}
// success
m_sliceViewOffset = 0;
m_numRows = numRows;
m_numCols = numCols;
}
// allocated by the callee but should be deleted by the caller
// TODO: change to use STL vector instead
template <class ElemType>
ElemType* CPUMatrix<ElemType>::CopyToArray() const
{
size_t numElements = GetNumElements();
if (numElements != 0)
{
ElemType* arrayCopyTo = NewArray<ElemType>(numElements);
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
return arrayCopyTo;
}
else
{
return nullptr;
}
}
//memory will be allocated by the callee if not enough but need to be deleted by the caller after it's done
//return number of elements copied
template <class ElemType>
size_t CPUMatrix<ElemType>::CopyToArray(ElemType*& arrayCopyTo, size_t& currentArraySize) const
{
size_t numElements = GetNumElements();
if (numElements > currentArraySize)
{
delete arrayCopyTo;
arrayCopyTo = NewArray<ElemType>(numElements);
currentArraySize = numElements;
}
if (numElements != 0)
{
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
}
return numElements;
}
template <typename ElemType>
void CPUMatrix<ElemType>::CopySection(size_t /*numRows*/, size_t /*numCols*/, ElemType* /*dst*/, size_t /*colStride*/) const
{
// REVIEW alexeyk: currently not used by CPU, but implement when possible.
RuntimeError("Not implemented.");
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateColumn(const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(col == 0 || col < GetNumCols());
return col * m_numRows; // matrix in column-wise storage
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateElement(const size_t row, const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(row < m_numRows);
return LocateColumn(col) + row; // matrix in column-wise storage
}
#pragma endregion Basic Operators
#pragma region Member BLAS Functions
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(ElemType alpha)
{
return AssignSumOf(alpha, *this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignSumOf(alpha, *this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha + a(i, j);
us(i + 1, j) = alpha + a(i + 1, j);
us(i + 2, j) = alpha + a(i + 2, j);
us(i + 3, j) = alpha + a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha + a(i, j);
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
//if a is a scalar, add it to all elements.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(const CPUMatrix<ElemType>& a)
{
// if (a.GetNumElements() == 1)
// *this += a(0,0);
// else
ScaleAndAdd(1, a, *this);
return *this;
}
//if [this] and a have same dimension then OUTPUT=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(const CPUMatrix<ElemType>& a) const
{
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c(a);
c += (*this)(0, 0);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c(*this);
c += a(0, 0);
return c;
}
else
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c += a;
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.GetNumElements() == 1)
{
SetValue(b);
(*this) += a;
}
else
{
SetValue(a);
(*this) += b;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(ElemType alpha)
{
return AssignDifferenceOf(*this, alpha);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignDifferenceOf(*this, alpha);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha - a(i, j);
us(i + 1, j) = alpha - a(i + 1, j);
us(i + 2, j) = alpha - a(i + 2, j);
us(i + 3, j) = alpha - a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha - a(i, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const ElemType alpha)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) - alpha;
us(i + 1, j) = a(i + 1, j) - alpha;
us(i + 2, j) = a(i + 2, j) - alpha;
us(i + 3, j) = a(i + 3, j) - alpha;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) - alpha;
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(-1, a, *this);
return *this;
}
//if [this] and a have same dimension then output=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(const CPUMatrix<ElemType>& a) const
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c -= a;
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (this != &a)
{
RequireSize(a.GetNumRows(), a.GetNumCols());
SetValue(a);
}
(*this) -= b;
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator*=(ElemType alpha)
{
Scale(alpha, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
Scale(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
Scale(alpha, a, *this);
return *this;
}
// [this]=a*b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB)
{
if (a.GetNumElements() == 1)
{
if (transposeB)
AssignTransposeOf(b);
(*this) *= a(0, 0);
}
else if (b.GetNumElements() == 1)
{
if (transposeA)
AssignTransposeOf(a);
(*this) *= b(0, 0);
}
else
Multiply(a, transposeA, b, transposeB, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(const CPUMatrix<ElemType>& a) const
{
auto& us = *this;
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(us(0, 0), a);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(a(0, 0), us);
return c;
}
else
{
CPUMatrix<ElemType> c;
Multiply(*this, a, c);
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator/=(ElemType alpha)
{
(*this) *= 1 / alpha;
return (*this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator/(ElemType alpha) const
{
return ((*this) * (1 / alpha));
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator^=(ElemType alpha)
{
auto& us = *this;
ElementWisePower(alpha, us, us);
return us;
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator^(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
ElementWisePower(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementPowerOf(const CPUMatrix<ElemType>& a, const ElemType power)
{
ElementWisePower(power, a, *this);
return *this;
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
return AssignElementProductOf(*this, a);
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementDivideBy(const CPUMatrix<ElemType>& a)
{
return AssignElementDivisionOf(*this, a);
}
//[this]=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOf: The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) * b(i, j);
us(i + 1, j) = a(i + 1, j) * b(i + 1, j);
us(i + 2, j) = a(i + 2, j) * b(i + 2, j);
us(i + 3, j) = a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) * b(i, j);
}
}
return *this;
}
//[this] +=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match [this].");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) += a(i, j) * b(i, j);
us(i + 1, j) += a(i + 1, j) * b(i + 1, j);
us(i + 2, j) += a(i + 2, j) * b(i + 2, j);
us(i + 3, j) += a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) += a(i, j) * b(i, j);
}
}
return *this;
}
//[this]=a ./ b
// TODO: This clips the divisor by a small value. Is that really what one would want?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementDivisionOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementDivisionOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementDivisionOf : The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
foreach_coord (i, j, us)
{
ElemType v = b(i, j);
if (v >= 0 && v < smallValue)
us(i, j) = a(i, j) / smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) = a(i, j) / (-smallValue);
else
us(i, j) = a(i, j) / v;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementMultiplyWith: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= a(i, 0);
us(i + 1, j) *= a(i + 1, 0);
us(i + 2, j) *= a(i + 2, 0);
us(i + 3, j) *= a(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= a(i, 0);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementMultiplyWith: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= v;
us(i + 1, j) *= v;
us(i + 2, j) *= v;
us(i + 3, j) *= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementDivideBy: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
if (v >= 0 && v < EPS_IN_INVERSE)
v = EPS_IN_INVERSE;
else if (v < 0 && v > -EPS_IN_INVERSE)
v = (-EPS_IN_INVERSE);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) /= v;
us(i + 1, j) /= v;
us(i + 2, j) /= v;
us(i + 3, j) /= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) /= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementDivideBy: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
for (long i = 0; i < m; i++)
{
ElemType v = a(i, 0);
if (v >= 0 && v < smallValue)
us(i, j) /= smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) /= (-smallValue);
else
us(i, j) /= v;
}
}
return *this;
}
//[this]=1 ./ a
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementInverse()
{
return AssignElementInverseOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementInverseOf(const CPUMatrix<ElemType>& a)
{
ElemType smallValue = EPS_IN_INVERSE;
if (a.IsEmpty())
LogicError("AssignElementInverseOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) < 0 && a(i, j) > -smallValue)
us(i, j) = 1 / (-smallValue);
else if (a(i, j) >= 0 && a(i, j) < smallValue)
us(i, j) = 1 / smallValue;
else
us(i, j) = 1 / a(i, j);
}
return *this;
}
//[this]=sigmoid([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoid()
{
return AssignSigmoidOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) >= 0)
us(i, j) = 1 / (1 + exp(-a(i, j)));
else
{
ElemType v = exp(a(i, j));
us(i, j) = v / (1 + v);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLinearRectifierDerivative()
{
return AssignLinearRectifierDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLinearRectifierDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLinearRectifierDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
us(i + 1, j) = a(i + 1, j) > 0.0f ? 1.0f : 0.0f;
us(i + 2, j) = a(i + 2, j) > 0.0f ? 1.0f : 0.0f;
us(i + 3, j) = a(i + 3, j) > 0.0f ? 1.0f : 0.0f;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoidDerivative()
{
return AssignSigmoidDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
ElemType v1 = a(i + 1, j);
us(i + 1, j) = v1 * (1 - v1);
ElemType v2 = a(i + 2, j);
us(i + 2, j) = v2 * (1 - v2);
ElemType v3 = a(i + 3, j);
us(i + 3, j) = v3 * (1 - v3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
}
}
return *this;
}
//[this]=tanh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTanh()
{
return AssignTanhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTanhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignTanhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = tanh(a(i, j));
us(i + 1, j) = tanh(a(i + 1, j));
us(i + 2, j) = tanh(a(i + 2, j));
us(i + 3, j) = tanh(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = tanh(a(i, j));
}
}
return *this;
}
//[this]=atanh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAtanh()
{
return AssignAtanhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAtanhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAtanhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = atanh(a(i, j));
us(i + 1, j) = atanh(a(i + 1, j));
us(i + 2, j) = atanh(a(i + 2, j));
us(i + 3, j) = atanh(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = atanh(a(i, j));
}
}
return *this;
}
//[this]=softmax([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLogSoftmax(const bool isColWise)
{
return AssignLogSoftmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogSoftmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignLogSoftmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
if (isColWise)
{
#pragma omp parallel for
foreach_column (j, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(0, j);
foreach_row (i, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_row (i, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_row (i, us)
us(i, j) -= sum;
}
}
else
{
#pragma omp parallel for
foreach_row (i, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(i, 0);
foreach_column (j, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_column (j, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_column (j, us)
us(i, j) -= sum;
}
}
return *this;
}
//[this]=hardmax([this])
//the max element is 1 else is 0
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceHardmax(const bool isColWise)
{
return AssignHardmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignHardmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignHardmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
bool isInplace = (us.Data() == a.Data());
if (!isInplace)
memset(us.Data(), 0, a.GetNumElements() * sizeof(ElemType));
if (isColWise)
{
foreach_column (j, a)
{
// we need to extract max
ElemType maxV = a(0, j);
long maxI = 0;
foreach_row (i, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxI = i;
}
}
if (isInplace)
memset(us.Data() + j * a.GetNumRows(), 0, a.GetNumRows() * sizeof(ElemType));
us(maxI, j) = 1.0f;
}
}
else
{
foreach_row (i, a)
{
// we need to extract max
ElemType maxV = a(i, 0);
long maxJ = 0;
foreach_column (j, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxJ = j;
}
}
if (isInplace)
{
foreach_column(j, us)
us(i, j) = (j == maxJ) ? 1.0f : 0.0f;
}
else
us(i, maxJ) = 1.0f;
}
}
return *this;
}
//[this]=sqrt([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSqrt()
{
return AssignSqrtOf(*this);
}
//to prevent negative values caused by floating operations, we force inputs to be >=0
//this may, however, hide problems in the caller.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSqrtOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSqrtOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
us(i + 1, j) = sqrt(max((ElemType)0, a(i + 1, j)));
us(i + 2, j) = sqrt(max((ElemType)0, a(i + 2, j)));
us(i + 3, j) = sqrt(max((ElemType)0, a(i + 3, j)));
}
// remaining
for (long i = m & ~3; i < m; i++)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceExp()
{
return AssignExpOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignExpOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignExpOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = exp(a(i, j));
us(i + 1, j) = exp(a(i + 1, j));
us(i + 2, j) = exp(a(i + 2, j));
us(i + 3, j) = exp(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = exp(a(i, j));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAbs()
{
return AssignAbsOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAbsOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAbsOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = abs(a(i, j));
us(i + 1, j) = abs(a(i + 1, j));
us(i + 2, j) = abs(a(i + 2, j));
us(i + 3, j) = abs(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = abs(a(i, j));
}
}
return *this;
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog()
{
return AssignLogOf(*this);
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog10()
{
return AssignLog10Of(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v < EPS_IN_LOG)
{
us(i, j) = LOG_OF_EPS_IN_LOG;
}
else
us(i, j) = log(v);
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLog10Of(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v <= 0)
LogicError("AssignLogOf: Log can only applied to numbers larger than 0.");
else if (v < EPS_IN_LOG)
{
us(i, j) = LOG10_OF_EPS_IN_LOG;
}
else
us(i, j) = log10(v);
}
return *this;
}
//[this]=cos([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceCosine()
{
return AssignCosineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCosineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = cos(v);
}
return *this;
}
//[this]=-sin([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceNegativeSine()
{
return AssignNegativeSineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNegativeSineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = -sin(v);
}
return *this;
}
//[this]=acos([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAcos()
{
return AssignAcosOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAcosOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAcosOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = acos(v);
}
return *this;
}
//[this]=asin([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAsin()
{
return AssignAsinOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAsinOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAsinOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = asin(v);
}
return *this;
}
//[this]=cosh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceCosh()
{
return AssignCoshOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCoshOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCoshOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = cosh(v);
}
return *this;
}
//[this]=sinh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSinh()
{
return AssignSinhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSinhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSinhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = sinh(v);
}
return *this;
}
//[this]=asinh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAsinh()
{
return AssignAsinhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAsinhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAsinhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = asinh(v);
}
return *this;
}
//Threshold truncating: this[i] = max( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateBottom(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateBottom: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
if (us(i + 1, j) < threshold)
us(i + 1, j) = threshold;
if (us(i + 2, j) < threshold)
us(i + 2, j) = threshold;
if (us(i + 3, j) < threshold)
us(i + 3, j) = threshold;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncate(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
auto& us = *this;
ElemType locThresholdPos = abs(threshold);
ElemType locTHresholdNeg = -locThresholdPos;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
if (us(i + 1, j) > locThresholdPos)
us(i + 1, j) = locThresholdPos;
else if (us(i + 1, j) < locTHresholdNeg)
us(i + 1, j) = locTHresholdNeg;
if (us(i + 2, j) > locThresholdPos)
us(i + 2, j) = locThresholdPos;
else if (us(i + 2, j) < locTHresholdNeg)
us(i + 2, j) = locTHresholdNeg;
if (us(i + 3, j) > locThresholdPos)
us(i + 3, j) = locThresholdPos;
else if (us(i + 3, j) < locTHresholdNeg)
us(i + 3, j) = locTHresholdNeg;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
}
}
return *this;
}
//x= x-threshold if x>threshold, x+threshold if x<-threshold, 0 otherwise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSoftThreshold(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
#pragma omp parallel for
for (long i = 0; i < (m & ~3); i += 4) // four-way unrolling
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
if (bufPtr[i + 1] > threshold)
bufPtr[i + 1] -= threshold;
else if (bufPtr[i + 1] < -threshold)
bufPtr[i + 1] += threshold;
else
bufPtr[i + 1] = 0;
if (bufPtr[i + 2] > threshold)
bufPtr[i + 2] -= threshold;
else if (bufPtr[i + 2] < -threshold)
bufPtr[i + 2] += threshold;
else
bufPtr[i + 2] = 0;
if (bufPtr[i + 3] > threshold)
bufPtr[i + 3] -= threshold;
else if (bufPtr[i + 3] < -threshold)
bufPtr[i + 3] += threshold;
else
bufPtr[i + 3] = 0;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
}
return *this;
}
//Threshold truncating: this[i] = max( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateBottomOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateBottomOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) < threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = min( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateTop(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateTop: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) > threshold)
us(i, j) = threshold;
}
return *this;
}
//Threshold truncating: this[i] = min( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateTopOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateTopOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) > threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = 0 if abs(this[i]<threshold).
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetToZeroIfAbsLessThan(const ElemType threshold)
{
if (IsEmpty())
LogicError("SetToZeroIfAbsLessThan: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (abs(us(i, j)) < threshold)
us(i, j) = 0;
}
return *this;
}
//sum of all abs(elements)
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfAbsElements() const
{
if (IsEmpty())
LogicError("SumOfAbsElements: Matrix is empty.");
if (std::is_same<ElemType, double>::value)
{
return (ElemType) cblas_dasum((int) GetNumElements(), reinterpret_cast<double*>(Data()), 1);
}
else if (std::is_same<ElemType, float>::value)
{
#pragma warning(suppress : 4244)
return cblas_sasum((int) GetNumElements(), reinterpret_cast<float*>(Data()), 1);
}
else
{
RuntimeError("Unsupported data format");
}
}
//sum of all elements
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfElements() const
{
if (IsEmpty())
LogicError("SumOfElements: Matrix is empty.");
ElemType sum = 0;
long m = (long) GetNumElements(); // note: OpenMP requires loop indices to be long, not size_t
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : sum)
for (long i = 0; i < (m & ~3); i += 4)
{
sum += bufPtr[i] + bufPtr[i + 1] + bufPtr[i + 2] + bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
sum += bufPtr[i];
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOfElements(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOfElements: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.SumOfElements();
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignOneHot(const CPUMatrix<ElemType>& a, vector<size_t>& shape, size_t axis)
{
if (a.IsEmpty())
LogicError("AssignOneHot: Matrix a is empty.");
if (axis >= shape.size())
LogicError("AssignOneHot: axis is not correct");
size_t item_size = 1;
for (size_t i = 0; i < shape.size() && i < axis; i++)
item_size *= shape[i];
size_t num_class = shape[axis];
auto& us = *this;
auto nCols = a.GetNumCols();
auto nRows = num_class * a.GetNumRows();
us.RequireSize(nRows, nCols);
ElemType* bufPtr = Data();
ElemType* aBufPtr = a.Data();
memset(bufPtr, 0, sizeof(ElemType) * nRows *nCols);
#pragma omp parallel for
for (long i = 0; i < a.GetNumElements(); i++)
{
if (aBufPtr[i] >= 0 && aBufPtr[i] < num_class)
{
size_t block_id = i / item_size;
size_t item_id = i % item_size;
bufPtr[block_id * num_class * item_size + item_id + item_size * (size_t)aBufPtr[i]] = 1;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::GatherFromTarget(const CPUMatrix<ElemType>& indices, const CPUMatrix<ElemType>& target, size_t row_elements)
{
if (indices.IsEmpty() || target.IsEmpty())
LogicError("GatherFromTarget: input matrix is empty.");
if (row_elements == 0)
LogicError("GatherFromTarget: target matrix at least need 1 dim.");
auto nCols = indices.GetNumCols();
auto nRows = indices.GetNumRows() * row_elements;
this->RequireSize(nRows, nCols);
ElemType* indicesBufPtr = indices.Data();
ElemType* targetBufPtr = target.Data();
ElemType* buffer = Data();
#pragma omp parallel for
for (int i = 0; i < indices.GetNumElements(); i++)
{
memcpy(buffer + i * row_elements, targetBufPtr + ((size_t)indicesBufPtr[i] * row_elements), sizeof(ElemType) * row_elements);
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ScatterToIndices(const CPUMatrix<ElemType>& values, const CPUMatrix<ElemType>& indices, size_t row_elements)
{
if (indices.IsEmpty() || values.IsEmpty())
LogicError("ScatterToIndices: input matrix is empty.");
ElemType* indicesBufPtr = indices.Data();
ElemType* valueBufPtr = values.Data();
ElemType* buffer = Data();
ScatterValues(indicesBufPtr, valueBufPtr, buffer, (ElemType)1, indices.GetNumElements(), row_elements, this->GetNumCols());
return *this;
}
template <class ElemType>
bool CPUMatrix<ElemType>::IsEqualTo(const CPUMatrix<ElemType>& a, const ElemType threshold /*= 1e-8*/) const
{
return AreEqual(*this, a, threshold);
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorSum(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty())
LogicError("VectorSum: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, a)
{
ElemType v = 0;
foreach_row (i, a)
{
#pragma omp atomic
v += a(i, j);
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, a)
{
ElemType v = 0;
foreach_column (j, a)
{
#pragma omp atomic
v += a(i, j);
}
c(i, 0) = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm1(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm1: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm1Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm1(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm2(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm2: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
ElemType* bufPtr = us.Data();
if (isColWise) // col-wise
{
c.RequireSize(1, n);
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_dnrm2(m, reinterpret_cast<double*>(bufPtr + us.LocateColumn(j)), 1);
}
}
else if(std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = cblas_snrm2(m, reinterpret_cast<float*>(bufPtr + us.LocateColumn(j)), 1);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
else
{
c.RequireSize(m, 1);
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_dnrm2(n, reinterpret_cast<double*>(bufPtr + i), m);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_snrm2(n, reinterpret_cast<float*>(bufPtr + i), m);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm2Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm2(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNormInf(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNormInf: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
// #pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
v = std::max(v, fabs_(us(i, j)));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
// #pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
v = std::max(v, fabs_(us(i, j)));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNormInfOf(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNormInf(*this, isColWise);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignInnerProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool isColWise)
{
InnerProduct(a, b, *this, isColWise);
return *this;
}
//column-wise crossproduct
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignKhatriRaoProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignKhatriRaoProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
RequireSize(rowsA * rowsB, cols);
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
for (long k = 0; k < cols; k++)
{
long jj = 0;
for (long j = 0; j < rowsB; j++)
{
for (long i = 0; i < rowsA; i++)
{
(*this)(jj++, k) = a(i, k) * b(j, k);
}
}
}
return *this;
}
//column-wise reshaped product. Used to compute KhatriRaoProduct Gradient
// this = reshape each column of a from (K1xK2,1) to (K1, K2)
// if each column of a is not transposed, each (K1, K2) times each column of b (K2, frames).
// the output is a (K1, frames) matrix
// if each column of a is tranposed, each (K1, K2)^T times each column of b(K1, frames) and output is (K2, frames)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddColumnReshapeProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool transposeAColumn)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddColumnReshapeProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
if (rowsA % rowsB != 0)
InvalidArgument("AddColumnReshapeProductOf: number of rows in a should be multiples of that in b.");
long rowsC = rowsA / rowsB;
if (rowsC != GetNumRows() || cols != GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: This matrix does not have the right size.");
auto& us = *this;
if (transposeAColumn)
{
// find nrows and ncols of tbe reshaped a
long nrows = rowsB;
long ncols = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++) // row and col is transposed
{
ElemType v = 0;
for (size_t i = 0; i < nrows; i++)
{
v += a(k, t) * b(i, t);
k++;
}
us(j, t) += v;
}
}
}
else
{
size_t ncols = rowsB;
size_t nrows = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++)
{
for (size_t i = 0; i < nrows; i++)
{
us(i, t) += a(k, t) * b(j, t);
k++;
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithScaleOf(ElemType alpha, const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(alpha, a, *this);
return *this;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::FrobeniusNorm() const
{
if (IsEmpty())
LogicError("FrobeniusNorm: Matrix is empty.");
ElemType v = 0;
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : v)
for (long i = 0; i < (m & ~3); i += 4)
{
v += bufPtr[i] * bufPtr[i] + bufPtr[i + 1] * bufPtr[i + 1] + bufPtr[i + 2] * bufPtr[i + 2] + bufPtr[i + 3] * bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v += bufPtr[i] * bufPtr[i];
}
return sqrt(v);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignFrobeniusNormOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignFrobeniusNormOf: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.FrobeniusNorm();
return us;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNormInf() const
{
if (IsEmpty())
LogicError("MatrixNormInf: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
#pragma omp critical
{
v = std::max(v, fabs_(us(i, j)));
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm0() const
{
if (IsEmpty())
LogicError("MatrixNorm0: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) != 0)
{
#pragma omp critical
{
++v;
}
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm1() const
{
if (IsEmpty())
LogicError("MatrixNorm1: Matrix is empty.");
auto& us = *this;
ElemType sum = 0;
#pragma omp parallel for reduction(+ : sum)
foreach_coord (i, j, us)
{
sum += abs(us(i, j));
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) = (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AddSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) += (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
//I decided to use CPUMatrix<ElemType>& maxIndexes instead of integer vector because the result may be used to do additional calculation
template <class ElemType>
void CPUMatrix<ElemType>::VectorMax(CPUMatrix<ElemType>& maxIndexes, CPUMatrix<ElemType>& maxValues, const bool isColWise, int topK) const
{
if (IsEmpty())
LogicError("VectorMax: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
if (topK > m)
InvalidArgument("VectorMax: TopK must be less or equal than the number of rows");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
maxValues.RequireSize(topK, n);
maxIndexes.RequireSize(topK, n);
if (topK == 1)
{
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v < us(i, j))
{
index = i;
v = us(i, j);
}
}
maxValues(0, j) = v;
maxIndexes(0, j) = (ElemType) index;
}
}
else
{
std::vector<int> indices(m);
const ElemType* curVal = Data();
ElemType* curIdx = maxIndexes.Data();
ElemType* curMax = maxValues.Data();
for (int icol = 0; icol < n; icol++, curVal += m, curIdx += topK, curMax += topK)
{
std::iota(indices.begin(), indices.end(), 0);
// Partial sort, descending order.
std::partial_sort(indices.begin(), indices.begin() + topK, indices.end(),
[curVal](const int& a, const int& b)
{
return curVal[a] > curVal[b];
});
// REVIEW alexeyk: the following produces warning (see SCL_SECURE_NO_WARNINGS) so use loop instead.
// std::transform(indices.begin(), indices.begin() + topK, curIdx, [](const int& a) { return static_cast<ElemType>(a); });
for (int i2 = 0; i2 < topK; i2++)
{
curIdx[i2] = static_cast<ElemType>(indices[i2]);
curMax[i2] = curVal[indices[i2]];
}
}
}
}
else
{
if (topK > 1)
RuntimeError("Row-wise TopK max is not supported.");
maxValues.RequireSize(m, 1);
maxIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v < us(i, j))
{
index = j;
v = us(i, j);
}
}
maxValues(i, 0) = v;
maxIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorMin(CPUMatrix<ElemType>& minIndexes, CPUMatrix<ElemType>& minValues, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorMin: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
minValues.RequireSize(1, n);
minIndexes.RequireSize(1, n);
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v > us(i, j))
{
index = i;
v = us(i, j);
}
}
minValues(0, j) = v;
minIndexes(0, j) = (ElemType) index;
}
}
else
{
minValues.RequireSize(m, 1);
minIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v > us(i, j))
{
index = j;
v = us(i, j);
}
}
minValues(i, 0) = v;
minIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNumOfDiff(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, bool searchInCol)
{
if (a.GetNumCols() != b.GetNumCols())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of columns.");
if (!searchInCol && a.GetNumRows() != b.GetNumRows())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of rows.");
ElemType n = 0;
if (!searchInCol)
{
foreach_coord (i, j, a)
{
n += (a(i, j) != b(i, j));
}
}
else
{
size_t crow = b.GetNumRows();
const ElemType* curCol = b.Data();
for (size_t icol = 0; icol < a.GetNumCols(); icol++, curCol += crow)
{
auto res = std::find(curCol, curCol + crow, a(0, icol));
if (res == curCol + crow)
n++;
}
}
RequireSize(1, 1); // result should be one element
(*this)(0, 0) = n;
return *this;
}
#pragma endregion Member BLAS Functions
#pragma region Other helper Functions
struct PrintRange
{
// print from begin to skipBegin, then from skipEnd to end
// skipBegin = end if no split
size_t begin;
size_t skipBegin;
size_t skipEnd;
size_t end;
bool IsEmpty() const { return end <= begin; }
// examples:
// * 3..10
// * -3..-3: include end-3..end and 0..3
PrintRange(ptrdiff_t first, ptrdiff_t last, size_t total)
{
if (first >= 0 && last >= 0)
{
begin = (size_t)first;
end = (size_t)last + 1;
if (end > total) // allow INT_MAX, meaning to end
end = total;
skipBegin = end;
skipEnd = end;
}
else if (first < 0 && last < 0)
{
begin = 0;
skipBegin = (size_t)(-last);
skipEnd = (size_t)(total + first);
if (skipEnd <= skipBegin)
skipBegin = skipEnd = total;
end = total;
}
else // if other combinations are ever of interest then implement them here
LogicError("Print: Bounds must be either both positive or both negative.");
}
};
// use negative ranges to print corners, e.g. Print("name", -3, -3, -3, -3) will print the first 3 and last 3 rows/cols
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName, ptrdiff_t rowFirst, ptrdiff_t rowLast, ptrdiff_t colFirst, ptrdiff_t colLast) const
{
fprintf(stderr, "\n###### ");
if (matrixName != nullptr)
fprintf(stderr, "%s ", matrixName);
fprintf(stderr, "(%lu, %lu)", (unsigned long)GetNumRows(), (unsigned long)GetNumCols());
if (rowFirst != 0 || colFirst != 0 || (size_t)(rowLast + 1) != GetNumRows() || (size_t)(colLast + 1) != GetNumCols())
fprintf(stderr, " [%ld:%ld, %ld:%ld]", (long)rowFirst, (long)rowLast, (long)colFirst, (long)colLast);
fprintf(stderr, " ######\n\n");
if (IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
PrintRange rowRange(rowFirst, rowLast, GetNumRows());
PrintRange colRange(colFirst, colLast, GetNumCols());
if (rowRange.IsEmpty() || colRange.IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
const auto& us = *this;
if (rowRange.begin > 0)
fprintf(stderr, "...\n");
for (size_t i = rowRange.begin; i < rowRange.end; i++)
{
if (i == rowRange.skipBegin) // insert ... between the two blocks if any
{
fprintf(stderr, "...\n");
i = rowRange.skipEnd;
}
if (colRange.begin > 0) // ... at line start
fprintf(stderr, "...\t");
for (size_t j = colRange.begin; j < colRange.end; j++)
{
if (j == colRange.skipBegin)
{
fprintf(stderr, "...\t");
j = colRange.skipEnd;
}
fprintf(stderr, "%.10f\t", (double)us(i, j));
}
if (colRange.end < GetNumCols()) // ... at line end
fprintf(stderr, "...");
fprintf(stderr, "\n");
}
if (rowRange.end < GetNumRows())
fprintf(stderr, "...\n");
}
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName /*=nullptr*/) const
{
Print(matrixName, 0, GetNumRows() - 1, 0, GetNumCols() - 1);
}
// file I/O
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::ReadFromFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::WriteToFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPackedConvolutionInput(const CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding)
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horitzontalSubsample) must be less or equal than kernelHeight (or kernelWidth).");
const size_t packedInputRows = kernelWidth * kernelHeight * inputChannels;
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
RequireSize(packedInputRows, packedInputColsPerSample * smallBatchSize);
if (zeroPadding)
SetValue((ElemType) 0);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1.0f + halfKernelHeight) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1.0f + halfKernelWidth) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
(*this)(packRow, packCol) = currentInputValue;
}
packColBase += (long) outputHeight;
}
}
}
return *this;
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::UnpackConvolutionInput(CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding) const
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horizonSubsample) must be less than or equal to kernelHeight (or kernelWidth).");
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1.0f + halfKernelHeight) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1.0f + halfKernelWidth) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
currentInputValue += (*this)(packRow, packCol);
}
packColBase += (long) outputHeight;
}
inputSubBatch(id, sample) = currentInputValue;
}
}
return inputSubBatch;
}
//assume each column is an input sample. Each sample is stored in (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignMaxPoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < (long) batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType maxVal = -FLT_MAX;
ElemType minVal = FLT_MAX;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
const ElemType val = inputBatch(rowInInput, sample); // pf[rowInWindow*channels];
maxVal = std::max(maxVal, val);
minVal = std::min(minVal, val);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = maxVal;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddMaxPoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch, const CPUMatrix<ElemType>& inputBatch, const CPUMatrix<ElemType>& outputBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = inputBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = (long) (nXC / channels); // row in input
const long c = (long) (nXC % channels); // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
ElemType inputValue = inputBatch(inputIndexWithinSample, sample);
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = (long) (outY * outputHeightTimesChannel + outX * channels + c);
if (inputValue == outputBatch(outputIndex, sample))
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAveragePoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
const size_t windowSize = windowWidth * windowHeight;
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType sum = 0;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
sum += inputBatch(rowInInput, sample);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = sum / windowSize;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddAveragePoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = outputGradientBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const long windowSize = (long) (windowWidth * windowHeight);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = nXC / (long) channels; // row in input
const long c = nXC % (long) channels; // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / (long) verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = outY * outputHeightTimesChannel + outX * (long) channels + c;
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample) / windowSize;
}
}
}
}
return *this;
}
#pragma endregion Other Helper Functions
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionForward(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += kernel.Data()[ivBase + skip + i] * (*this)(colBase + dcol, sample);
}
output(row, sample) = sum;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardData(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& grad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
grad(colBase + dcol, sample) += curGrad * kernel.Data()[ivBase + skip + i];
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardKernel(const CPUMatrix<ElemType>& in, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& kernelGrad) const
{
// Do NOT parallelize these loops!
for (size_t sample = 0; sample < GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < in.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < in.GetNumRows());
kernelGrad.Data()[ivBase + skip + i] += curGrad * in(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInput(size_t unrollCols, size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
output.Data()[(row * batchSize + sample) * unrollCols + skip + i] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionOutput(size_t unrollCols, size_t mapInCount, size_t mapOutCount, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
if (mpRowCol.GetNumRows() % mapOutCount != 0)
InvalidArgument("The number of rows in mpRowCol must be multiple of mapOutCount.");
size_t mapOutSize = mpRowCol.GetNumRows() / mapOutCount;
size_t batchSize = GetNumCols();
size_t kernelSize = runs(1, 0);
if (kernelSize % mapInCount != 0)
InvalidArgument("kernelSize must be multiple of mapInCount.");
size_t kernelMapSize = kernelSize / mapInCount;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < std::min(size, (int)kernelMapSize); i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
size_t isrc = row;
size_t idst = ((colBase + dcol) * batchSize + sample) * unrollCols + ((skip + i) % kernelMapSize) * mapOutCount;
for (size_t outMap = 0; outMap < mapOutCount; outMap++, isrc += mapOutSize)
{
assert(isrc < GetNumElements());
assert(idst + outMap < output.GetNumElements());
output.Data()[idst + outMap] = (*this)(isrc, sample);
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInputForKernelBackprop(size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
size_t unrollCols = mapOutSize * batchSize;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
size_t idst = (skip + i) * unrollCols + row * batchSize + sample;
assert(idst < output.GetNumElements());
output.Data()[idst] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
assert(std::numeric_limits<ElemType>::has_infinity);
ElemType res = -std::numeric_limits<ElemType>::infinity();
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
res = std::max(res, (*this)(colBase + dcol, sample));
}
output(row, sample) = res;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingBackward(const CPUMatrix<ElemType>& out, const CPUMatrix<ElemType>& in,
const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices,
CPUMatrix<ElemType>& grad, bool accumulateGradient) const
{
if (!accumulateGradient)
grad.SetValue((ElemType)0);
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType g = (*this)(row, sample);
ElemType m = out(row, sample);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
if (in(colBase + dcol, sample) >= m)
{
#pragma omp atomic
grad(colBase + dcol, sample) += g;
break;
}
}
}
}
}
// For each image, for each ROI, this function treats that ROI as an image
// and does max pooling so that it has output size pooledHeight x pooledWidth.
// It loops over each location in the output tensor, computes which ROI
// and image should populate that location, computes the subset of the image
// corresponding to the ROI and which pixels in that subset should go into the
// output location, then takes the max value over that window.
// src: Images [W x H x C x N]
// roiData: ROIs [4 x numROIs x N],
// dst: Pooled ROIs [PW x PH x C x numROIs x N]
// argmax: max positions [PW x PH x C x numROIs x N]
// spatialScale ratio of input feature map to the original image.
// where PW = Pooled Width, PH = Pooled Height, C = Channels, N = Batch Size
template <class ElemType>
void CPUMatrix<ElemType>::MaxROIPoolingForward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& output,
CPUMatrix<ElemType>& argmax, double spatialScale) const
{
size_t roiOutputSize = pooledHeight * pooledWidth * channels;
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
auto img = ColumnSlice(imgIdx, 1);
auto rois = roiData.ColumnSlice(imgIdx, 1);
#pragma omp parallel for
for (int roiIdx = 0; roiIdx < numRois; roiIdx++)
{
// each ROI is 4 elements: (x, y, w, h).
int base = roiIdx * 4;
// roi points represent the absolute location of the roi
// in the original image.
ElemType scX1 = rois(base, (ElemType)0);
ElemType scY1 = rois(base + (ElemType)1, (ElemType)0);
ElemType scX2 = rois(base + (ElemType)2, (ElemType)0);
ElemType scY2 = rois(base + (ElemType)3, (ElemType)0);
// compute actual spatial location of the ROI in our featuremap.
size_t x1 = (size_t)round(scX1 * spatialScale);
size_t y1 = (size_t)round(scY1 * spatialScale);
size_t x2 = (size_t)round(scX2 * spatialScale);
size_t y2 = (size_t)round(scY2 * spatialScale);
ElemType roiW = (ElemType)max(x2 - x1 + 1, (size_t)1);
ElemType roiH = (ElemType)max(y2 - y1 + 1, (size_t)1);
const ElemType winW = roiW / (ElemType)pooledWidth;
const ElemType winH = roiH / (ElemType)pooledHeight;
// inspired by Ross Girshick fast-rcnn caffe cpu: https://github.com/rbgirshick/fast-rcnn
// loop over spatial locations in output.
#pragma omp parallel for
for (int outw = 0; outw < pooledWidth; outw++)
{
for (int outh = 0; outh < pooledHeight; outh++)
{
// compute the top left corner of the input
// spatial window corresponding to this output unit
size_t hstart = (size_t)floor(outh * winH);
size_t wstart = (size_t)floor(outw * winW);
// compute bottom right corner (not included)
size_t hend = (size_t)ceil((outh + 1) * winH);
size_t wend = (size_t)ceil((outw + 1) * winW);
// offset window based on ROI top left corner.
// these indices are into the input slice.
hstart = min(max(hstart + y1, (size_t)0), height);
wstart = min(max(wstart + x1, (size_t)0), width);
hend = min(max(hend + y1, (size_t)0), height);
wend = min(max(wend + x1, (size_t)0), width);
bool isempty = (hend <= hstart) || (wend <= wstart);
for (size_t c = 0; c < channels; c++)
{
// [W x H x C x R x N]; R = ROIs per image
size_t outputIdx = roiIdx * roiOutputSize + outw + outh * pooledWidth + c * pooledHeight * pooledWidth;
size_t maxidx = 0;
ElemType maxval = isempty ? (ElemType)0 : (ElemType)-FLT_MAX;
size_t baseIdx = c * height * width;
for (size_t h = hstart; h < hend; h++)
{
for (size_t w = wstart; w < wend; w++)
{
// stored argmax indices are relative to the current channel.
size_t dataIdx = w + h * width;
if (img(baseIdx + dataIdx, 0) > maxval)
{
maxval = img(baseIdx + dataIdx, 0);
maxidx = dataIdx;
}
}
}
output(outputIdx, imgIdx) = maxval;
argmax(outputIdx, imgIdx) = maxidx;
}
}
}
}
}
}
// This function loops over locations in the input to the ROIPoolingNode (image locations).
// It loops over the ROIs corresponding to that image, seeing which ones could contain the current location
// in their output. For each ROI, it checks the argmax data to see if that ROI indeed chose
// this pixel location as the maximum. If so, it increments the gradient term for the input location.
template <class ElemType>
void CPUMatrix<ElemType>::MaxROIPoolingBackward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& grad,
CPUMatrix<ElemType>& argmax, double spatialScale) const
{
// loop over images in the batch.
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
// ROIs for this image. length 4*numRois;
auto rois = roiData.ColumnSlice(imgIdx, 1).Data();
// gradient values for all ROIs from this image. length numRois*pooledHeight*pooledWidth*channels;
auto pooledGrad = ColumnSlice(imgIdx, 1).Data();
auto argmaxCol = argmax.ColumnSlice(imgIdx, 1).Data();
// loop over spatial locations in the image.
#pragma omp parallel for
for (int w = 0; w < width; w++)
{
#pragma omp parallel for
for (int h = 0; h < width; h++)
{
// loop over the ROIs seeing which ones contain this location.
for (int roiN = 0; roiN < numRois; roiN++)
{
// each ROI is 4 elements: (x, y, w, h).
int roiOffset = roiN * 4;
// ROI data points represent the absolute location of the roi
// in the original image.
size_t roiStartW = (size_t)round(rois[roiOffset + 0] * spatialScale);
size_t roiStartH = (size_t)round(rois[roiOffset + 1] * spatialScale);
size_t roiEndW = (size_t)round(rois[roiOffset + 2] * spatialScale);
size_t roiEndH = (size_t)round(rois[roiOffset + 3] * spatialScale);
size_t roiWidth = max(roiEndW - roiStartW + 1, (size_t)1);
size_t roiHeight = max(roiEndH - roiStartH + 1, (size_t)1);
// skip this ROI if it doesn't contain the current input location.
const bool inROI = (w >= roiStartW && w < roiStartW + roiWidth &&
h >= roiStartH && h < roiStartH + roiHeight);
if (!inROI)
continue;
ElemType winH = (ElemType)roiHeight / (ElemType)pooledHeight;
ElemType winW = (ElemType)roiWidth / (ElemType)pooledWidth;
// what pooled nodes in the output for this ROI could have pooled this input location?
size_t phstart = (size_t)((h - roiStartH) / winH);
size_t pwstart = (size_t)((w - roiStartW) / winW);
size_t phend = (size_t)(ceil((h - roiStartH + 1) / winH));
size_t pwend = (size_t)(ceil((w - roiStartW + 1) / winW));
phstart = min(max(phstart, (size_t)0), pooledHeight);
phend = min(max(phend, (size_t)0), pooledHeight);
pwstart = min(max(pwstart, (size_t)0), pooledWidth);
pwend = min(max(pwend, (size_t)0), pooledWidth);
for (size_t c = 0; c < channels; c++)
{
ElemType gradient = 0;
// [W x H x C x N]
size_t index = w + h*width + c*height*width;
// go right up to channel c of the current ROI.
size_t offset = (roiN * channels + c) * pooledWidth * pooledHeight;
const ElemType* offsetPoolGrad = pooledGrad + offset;
const ElemType* offsetArgmax = argmaxCol + offset;
for (size_t ph = phstart; ph < phend; ph++)
{
for (size_t pw = pwstart; pw < pwend; pw++)
{
if ((size_t)offsetArgmax[ph * pooledWidth + pw] == (w + h * width))
{
gradient += offsetPoolGrad[ph * pooledWidth + pw];
}
}
}
#pragma omp atomic
grad(index, imgIdx) += gradient;
}
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxUnpooling(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices,
const CPUMatrix<int>& indices, const CPUMatrix<ElemType>& poolInput,
CPUMatrix<ElemType>& input) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < input.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType curMax = poolInput(colBase + indices(i0, 0), sample);
ElemType prevMax = curMax;
int imax = 0;
for (int i = 1; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < poolInput.GetNumRows());
curMax = std::max(curMax, poolInput(colBase + dcol, sample));
if (curMax > prevMax)
{
prevMax = curMax;
imax = i;
}
}
int dcol = indices(i0 + imax, 0);
assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
input(colBase + dcol, sample) = (*this)(row, sample);
//int i = (int)poolIn(row, sample);
//assert(0 <= i && i < size);
//int dcol = indices(i0 + i, 0);
//assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
//input(colBase + dcol, sample) = (*this)(row, sample);
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output, const bool poolIncludePad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += (*this)(colBase + dcol, sample);
}
// Note that we divide by size which is the number of actual elements (does not include padding).
// if poolIncludePad == true, use avg_pool_include_pad
if (poolIncludePad)
size = indices(0, 0);
output(row, sample) = sum / size;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingBackward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& grad, const bool poolIncludePad, bool accumulateGradient) const
{
if (!accumulateGradient)
grad.SetValue((ElemType)0);
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
int tmp = size;
if (poolIncludePad)
size = indices(0, 0);
assert(size > 0);
ElemType g = (*this)(row, sample) / size;
size = tmp;
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
#pragma omp atomic
grad(colBase + dcol, sample) += g;
}
}
}
}
template <class ElemType>
template <class StatType>
void CPUMatrix<ElemType>::BatchNormalizationForward(const CPUMatrix<StatType>& scale, const CPUMatrix<StatType>& bias, bool inferenceOnly, double expAvgFactor, double blendFactor,
CPUMatrix<StatType>& runMean, CPUMatrix<StatType>& runVariance, CPUMatrix<ElemType>& out, double epsilon,
CPUMatrix<StatType>& saveMean, CPUMatrix<StatType>& saveInvStdDev) const
{
if (GetNumRows() % scale.GetNumRows() != 0)
LogicError("The number of rows of this matrx must be multiple of the number of rows of the scale matrix.");
if (!inferenceOnly || expAvgFactor != 0 || blendFactor != 1)
RuntimeError("Batch normalization training on CPU is not yet implemented.");
saveMean.Resize(0, 0); // only doing inference: these two are not produced
saveInvStdDev.Resize(0, 0);
bool spatial = GetNumRows() != scale.GetNumRows();
if (spatial)
{
size_t spatialSize = GetNumRows() / scale.GetNumRows();
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
size_t imap = irow / spatialSize;
ElemType stdDev = sqrt(runVariance(imap, 0) + epsilon);
out(irow, icol) = (ElemType)(scale(imap, 0) * ((*this)(irow, icol) - runMean(imap, 0)) / stdDev + bias(imap, 0));
}
}
}
else
{
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
ElemType stdDev = sqrt(runVariance(irow, 0) + epsilon);
out(irow, icol) = (ElemType)(scale(irow, 0) * ((*this)(irow, icol) - runMean(irow, 0)) / stdDev + bias(irow, 0));
}
}
}
}
template <class ElemType>
template <class StatType>
void CPUMatrix<ElemType>::BatchNormalizationBackward(const CPUMatrix<ElemType>& in, CPUMatrix<ElemType>& grad, const CPUMatrix<StatType>& scale, double blendFactor,
const CPUMatrix<StatType>& saveMean, const CPUMatrix<StatType>& saveInvStdDev,
CPUMatrix<StatType>& scaleGrad, CPUMatrix<StatType>& biasGrad) const
{
UNUSED(in); UNUSED(grad); UNUSED(scale); UNUSED(blendFactor), UNUSED(saveMean); UNUSED(saveInvStdDev); UNUSED(scaleGrad); UNUSED(biasGrad);
RuntimeError("Batch normalization training on CPU is not yet implemented.");
}
#pragma region Static BLAS Functions
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = alpha * op(a) * op(b) + beta*c</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="beta">Scalar</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
ElemType beta, CPUMatrix<ElemType>& c, shared_ptr<QuantizedMultiplier<ElemType>> pQuantizedMultiplier)
{
if (a.IsEmpty() || b.IsEmpty())
return;
int m, n, k, l;
int lda, ldb, ldc;
CBLAS_TRANSPOSE mklTransA;
CBLAS_TRANSPOSE mklTransB;
if (transposeA)
{
m = (int) a.GetNumCols();
k = (int) a.GetNumRows();
lda = k;
mklTransA = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
m = (int) a.GetNumRows();
k = (int) a.GetNumCols();
lda = m;
mklTransA = CBLAS_TRANSPOSE::CblasNoTrans;
}
if (transposeB)
{
l = (int) b.GetNumCols();
n = (int) b.GetNumRows();
ldb = n;
mklTransB = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
l = (int) b.GetNumRows();
n = (int) b.GetNumCols();
ldb = l;
mklTransB = CBLAS_TRANSPOSE::CblasNoTrans;
}
assert(m > 0 && k > 0 && l > 0 && n > 0); // converting from size_t to int may cause overflow
if (k != l)
InvalidArgument("CPUMatrix<ElemType>::MultiplyAndWeightedAdd : The inner dimensions of a and b must match.");
if (beta == 0)
c.RequireSize(m, n);
else
c.VerifySize(m, n); // Can't resize if beta != 0
ldc = (int) c.GetNumRows();
if (pQuantizedMultiplier == nullptr)
{
if (std::is_same<ElemType, double>::value)
{
cblas_dgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<double*>(a.Data()), lda, reinterpret_cast<double*>(b.Data()), ldb, beta, reinterpret_cast<double*>(c.Data()), ldc);
}
else if (std::is_same<ElemType, float>::value)
{
#pragma warning(suppress : 4244)
cblas_sgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<float*>(a.Data()), lda, reinterpret_cast<float*>(b.Data()), ldb, beta, reinterpret_cast<float*>(c.Data()), ldc);
}
else
{
RuntimeError("Unsupported data format");
}
}
else
{
// TODO: support transpose product
if (mklTransA == CBLAS_TRANSPOSE::CblasTrans || mklTransB == CBLAS_TRANSPOSE::CblasTrans)
LogicError("Quantized multiplier currently doesn't support transpose.");
pQuantizedMultiplier->Multiply(m, n, k, a.Data(), b.Data(), c.Data());
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Multiply1x1AndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b,
ElemType beta, CPUMatrix<ElemType>& c)
{
if (a.GetNumElements() != 1)
InvalidArgument("the argument a must be a scalar"); // a is a scalar
ElemType f = alpha * a.Get00Element();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f;
else
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f + c(i, j) * beta;
}
template <class ElemType>
void CPUMatrix<ElemType>::ColumnwiseScaleAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& v, ElemType beta, CPUMatrix<ElemType>& c)
{
if (v.GetNumRows() != 1 && v.GetNumCols() != 1)
InvalidArgument("the argument v must be a vector"); // v is a vector
if (beta == 0)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
else
c.VerifySize(a.GetNumRows(), a.GetNumCols()); // Can't resize if beta != 0
const ElemType* vd = v.Data();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j];
else
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j] + c(i, j) * beta;
}
/* compute singular value decomposition as
A = U*SIGMA*VT
W is used as temp working memory
*/
template <class ElemType>
void CPUMatrix<ElemType>::SVD(const CPUMatrix<ElemType>& A, CPUMatrix<ElemType>& SIGMA, CPUMatrix<ElemType>& U, CPUMatrix<ElemType>& VT, CPUMatrix<ElemType>& W)
{
if (A.IsEmpty())
LogicError("SVD: input matrix is empty.");
int info;
int m, n, lda, ldu, ldvt;
m = (int) A.GetNumRows();
n = (int) A.GetNumCols();
W.GetNumRows(); // W is used as temp working memory
lda = m;
ldu = m;
ldvt = n;
U.RequireSize(m, m);
SIGMA.RequireSize(std::min(m, n), 1);
VT.RequireSize(n, n);
#if CNTK_UWP
RuntimeError("Error, LAPACKE_*gesvd is not supported for UWP.\n");
#else
if (std::is_same<ElemType, double>::value)
{
std::vector<double> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_dgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<double*>(A.Data()), (int) lda, reinterpret_cast<double*>(SIGMA.Data()),
reinterpret_cast<double*>(U.Data()), (int) ldu, reinterpret_cast<double*>(VT.Data()), (int) ldvt, &superb[0]);
}
else if (std::is_same<ElemType, float>::value)
{
std::vector<float> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_sgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<float*>(A.Data()), (int) lda, reinterpret_cast<float*>(SIGMA.Data()),
reinterpret_cast<float*>(U.Data()), (int) ldu, reinterpret_cast<float*>(VT.Data()), (int) ldvt, &superb[0]);
}
else
{
RuntimeError("Unsupported data format");
}
#endif
if (info > 0)
{
RuntimeError("The algorithm computing SVD failed to converge.\n");
}
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b) + c</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndAdd(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 1.0, c);
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignSoftmaxSum(const CPUMatrix<ElemType>& softmax, CPUMatrix<ElemType>& c)
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = (int) (*this)(0, instance_id);
log_likelihood += softmax(instance_id, sample);
}
c(0, 0) = -log_likelihood;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNCEUnnormalizedEval(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = -(int) (*this)(0, instance_id);
ElemType score = bias(sample, 0);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += b(dim, sample) * a(dim, instance_id);
log_likelihood += score;
}
c(0, 0) = -log_likelihood;
}
//samples+prob gradient hidden embedding embedding/hidden
//a.m_CPUMatrix->AssignNCEDerivative(*tmp.m_CPUMatrix, *a.m_CPUMatrix, *b.m_CPUMatrix, inputIndex, *c.m_CPUMatrix);
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNCEDerivative(const CPUMatrix<ElemType>& tmp, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t inputIndex, CPUMatrix<ElemType>& c)
{
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
if (inputIndex == 1)
{
#pragma omp parallel for
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, instance_id) -= b(dim, sample) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 2)
{
int i_blocks = omp_get_num_threads() * 16;
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
#pragma omp parallel for
for (int ib = 0; ib < i_blocks; ib++)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
if (sample % i_blocks == ib)
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, sample) -= a(dim, instance_id) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 3)
{
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
c(0, sample) -= tmp(sample_id, instance_id);
}
}
else
InvalidArgument("The argument inputIndex must be 1 or 2 or 3.");
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNoiseContrastiveEstimation(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& tmp, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
double log_likelihood = 0.0;
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
size_t num_noise_samples = sample_size - 1;
double log_num_noise_samples = std::log(num_noise_samples);
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
double score = bias(0, sample);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += (double)(a(dim, instance_id) * b(dim, sample));
double sample_prob = -(*this)(2 * sample_id + 1, instance_id);
if (sample_id == 0)
sample_prob = -sample_prob;
double score_noise = log_num_noise_samples + sample_prob;
double z = LogAdd(score, score_noise);
double logprob = score - z;
double logprob_noise = score_noise - z;
tmp(sample_id, instance_id) = (ElemType) -std::exp(logprob);
if (sample_id == 0)
tmp(sample_id, instance_id) += 1;
log_likelihood += sample_id == 0 ? logprob : logprob_noise;
}
c(0, 0) = (ElemType) -log_likelihood;
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b)</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 0.0, c);
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b are not transposed): c = a * b</summary>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, false, b, false, 0.0, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a + c</summary>
/// if a is a column vector, add to all columns of c
/// if a is a row vector, add to all rows of c
/// if a is a scalar, add to all rows of c
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::ScaleAndAdd(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty() || c.IsEmpty())
LogicError("ScaleAndAdd: one of the input matrices is empty.");
if (a.GetNumRows() != 1 && a.GetNumCols() != 1) // a is not a col or row vector
{
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
const int incy = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if ((int) c.GetNumRows() != m || (int) c.GetNumCols() != n)
InvalidArgument("Dimension of matrix c does not match dimension of matrix a.");
if (std::is_same<ElemType, double>::value)
{
cblas_daxpy(len, alpha, reinterpret_cast<double*>(a.Data()), incx, reinterpret_cast<double*>(c.Data()), incy);
}
else if (std::is_same<ElemType, float>::value)
{
#pragma warning(suppress : 4244)
cblas_saxpy(len, alpha, reinterpret_cast<float*>(a.Data()), incx, reinterpret_cast<float*>(c.Data()), incy);
}
else
{
RuntimeError("Unsupported data format");
}
}
else if (a.GetNumElements() == 1) // scalar, add to all elements
{
ElemType v = alpha * a(0, 0);
long m = (long) c.GetNumRows(), n = (long) c.GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
c(i, j) += v;
c(i + 1, j) += v;
c(i + 2, j) += v;
c(i + 3, j) += v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
c(i, j) += v;
}
}
}
else if (a.GetNumCols() == 1) // col vector, add it to all columns
{
int m = (int) c.GetNumRows();
if (m != (int) a.GetNumRows())
InvalidArgument("To add column vector, rows should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
cblas_daxpy(m, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(m, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
else // row vector, add it to all rows
{
int m = (int) c.GetNumRows();
int n = (int) c.GetNumCols();
if (n != (int) a.GetNumCols())
InvalidArgument("To add row vector, cols should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
cblas_daxpy(n, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + i), m);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(n, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + i), m);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
}
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() &&
a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols()))
{
InvalidArgument("AddScaledDifference: a, b, and c must have same dimension.");
}
if (a.IsEmpty())
LogicError("AddScaledDifference: Input matrix a is empty.");
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] += alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] += alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] += alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
{
InvalidArgument("AssignScaledDifference: a, b must have same dimension.");
}
if (a.IsEmpty())
LogicError("AssignScaledDifference: Input matrix a is empty.");
if (&c != &a && &c != &b)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] = alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] = alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] = alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
// c[ci,cj] += a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AddElementToElement(ElemType beta, const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AddElementToElement: index out of range.");
ElemType us = beta ? beta * c(ci, cj) : (ElemType)0; // do not multiply if beta is 0, could be a NaN
us += a(ai, aj);
c(ci, cj) = us;
}
////c[ci,cj] += a[ai,aj]
//template<class ElemType>
//void CPUMatrix<ElemType>::AddLogElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
//{
// if (ai >= a.GetNumRows() || aj >=a.GetNumCols() ||
// ci >= c.GetNumRows() || cj >=c.GetNumCols())
// InvalidArgument("AddElementToElement: index out of range.");
//
// ElemType v = a(ai,aj);
// c(ci, cj) += ((v < EPS_IN_LOG) ? LOG_OF_EPS_IN_LOG : log(v));
//}
#if 0 // now done as AddElementToElement (beta=0)
// c[ci,cj] = a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AssignElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AssignElementToElement: index out of range.");
c(ci, cj) = a(ai, aj);
}
#endif
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AddScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AssignScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
c.RequireSize(m, n);
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (alpha == 0)
{
memset(cBufPtr, 0, sizeof(ElemType) * c.GetNumElements());
return;
}
long size = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (size & ~3); i += 4)
{
cBufPtr[i] = alpha * aBufPtr[i];
cBufPtr[i + 1] = alpha * aBufPtr[i + 1];
cBufPtr[i + 2] = alpha * aBufPtr[i + 2];
cBufPtr[i + 3] = alpha * aBufPtr[i + 3];
}
// remaining elements
for (long i = size & ~3; i < size; i++)
{
cBufPtr[i] = alpha * aBufPtr[i];
}
}
/// <summary>Matrix-scalar multiply with col-major matrices: a = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if (alpha == 0 && incx == 1)
{
memset(a.Data(), 0, sizeof(ElemType) * len);
}
else if (std::is_same<ElemType, double>::value)
{
cblas_dscal(len, alpha, reinterpret_cast<double*>(a.Data()), incx);
}
else if (std::is_same<ElemType, float>::value)
{
#pragma warning(suppress : 4244)
cblas_sscal(len, alpha, reinterpret_cast<float*>(a.Data()), incx);
}
else
{
RuntimeError("Unsupported data format");
}
}
/// <summary>Matrix multiply with col-major matrices: a = alpha[1,1] * a</summary>
/// <param name="alpha">1x1 matrix</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(CPUMatrix<ElemType> alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
if (alpha.GetNumElements() != 1)
LogicError("Matrix alpha must be 1x1");
CPUMatrix<ElemType>::Scale(alpha(0, 0), a);
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProduct(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(1, n);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
else
{
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
}
// treat matrices as vectors. do vec(a)^T vec(b)
template <class ElemType>
ElemType CPUMatrix<ElemType>::InnerProductOfMatrices(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProductOfMatrices: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProductOfMatrices: Matrices a and b should have same dimension.");
if (std::is_same<ElemType, double>::value)
{
return (ElemType) cblas_ddot((int) a.GetNumElements(), reinterpret_cast<double*>(a.Data()), 1, reinterpret_cast<double*>(b.Data()), 1);
}
else if (std::is_same<ElemType, float>::value)
{
#pragma warning(suppress : 4244)
return (ElemType) cblas_sdot((int) a.GetNumElements(), reinterpret_cast<float*>(a.Data()), 1, reinterpret_cast<float*>(b.Data()), 1);
}
else
{
RuntimeError("Unsupported data format");
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ElementWisePower(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: The input matrix a is empty.");
c.RequireSize(a.GetNumRows(), a.GetNumCols());
if (alpha == 2)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j);
}
}
else if (alpha == 3)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j) * a(i, j);
}
}
else
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = pow(a(i, j), alpha);
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::BatchMatMul(ElemType beta, const CPUMatrix<ElemType>& a, const bool transposeA, const int m, const CPUMatrix<ElemType>& b, const bool transposeB, const int n, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("BatchMatMul: one of the input matrices is empty.");
if (!isColWise)
LogicError("Only column wise is supported.");
const int aSampleElemNum = (int)a.GetNumRows();
const int aBatchSize = (int)a.GetNumCols();
const int bSampleElemNum = (int)b.GetNumRows();
const int bBatchSize = (int)b.GetNumCols();
assert(aSampleElemNum > 0 && aBatchSize > 0 && bSampleElemNum > 0 && bBatchSize > 0);
if (aBatchSize != bBatchSize)
InvalidArgument("BatchMatMul: Matrices a and b should have same batch size.");
int k = aSampleElemNum / m;
int kb = bSampleElemNum / n;
if (k != kb)
InvalidArgument("BatchMatMul: Matrices a's cols number should match Matrices b's rows number.");
size_t cSampleElemNum = m * n;
if (beta == 0)
c.RequireSize(cSampleElemNum, aBatchSize);
else
c.VerifySize(cSampleElemNum, aBatchSize); // Can't resize if beta != 0
#ifdef USE_OPENBLAS
int lda, ldb, ldc;
CBLAS_TRANSPOSE blasTransA;
CBLAS_TRANSPOSE blasTransB;
lda = transposeA ? k : m;
ldb = transposeB ? n : k;
blasTransA = transposeA ? CblasTrans : CblasNoTrans;
blasTransB = transposeB ? CblasTrans : CblasNoTrans;
ldc = m;
std::vector<const ElemType *> a_array;
std::vector<const ElemType *> b_array;
std::vector<ElemType *> c_array;
a_array.reserve(aBatchSize);
b_array.reserve(aBatchSize);
c_array.reserve(aBatchSize);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
for (size_t i = 0; i < aBatchSize; i++)
{
a_array.push_back(aBufPtr + a.LocateColumn(i));
b_array.push_back(bBufPtr + b.LocateColumn(i));
c_array.push_back(cBufPtr + c.LocateColumn(i));
}
for (size_t i = 0; i < aBatchSize; i++)
{
if (sizeof(ElemType) == sizeof(double))
{
double alpha = 1.0;
cblas_dgemm((CBLAS_ORDER)(int)MatrixOrder::ColMajor, blasTransA, blasTransB, m, n, k, alpha, reinterpret_cast<const double*>(a_array[i]), lda, reinterpret_cast<const double*>(b_array[i]), ldb, double(beta), reinterpret_cast<double*>(c_array[i]), ldc);
}
else
{
float alpha = 1.0f;
cblas_sgemm((CBLAS_ORDER)(int)MatrixOrder::ColMajor, blasTransA, blasTransB, m, n, k, alpha, reinterpret_cast<const float*>(a_array[i]), lda, reinterpret_cast<const float*>(b_array[i]), ldb, float(beta), reinterpret_cast<float*>(c_array[i]), ldc);
}
}
#else
std::vector<int> m_array(aBatchSize, m);
std::vector<int> n_array(aBatchSize, n);
std::vector<int> k_array(aBatchSize, k);
std::vector<int> lda_array(aBatchSize, transposeA ? k : m);
std::vector<int> ldb_array(aBatchSize, transposeB ? n : k);
std::vector<int> ldc_array(aBatchSize, m);
std::vector<int> group_size(1, aBatchSize);
std::vector<CBLAS_TRANSPOSE> transa_array(aBatchSize, transposeA ? CblasTrans : CblasNoTrans);
std::vector<CBLAS_TRANSPOSE> transb_array(aBatchSize, transposeB ? CblasTrans : CblasNoTrans);
std::vector<const ElemType *> a_array;
std::vector<const ElemType *> b_array;
std::vector<ElemType *> c_array;
a_array.reserve(aBatchSize);
b_array.reserve(aBatchSize);
c_array.reserve(aBatchSize);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
for (size_t i = 0; i < aBatchSize; i++)
{
a_array.push_back(aBufPtr + a.LocateColumn(i));
b_array.push_back(bBufPtr + b.LocateColumn(i));
c_array.push_back(cBufPtr + c.LocateColumn(i));
}
if (sizeof(ElemType) == sizeof(double))
{
std::vector<double> alpha_array(group_size[0], 1.0);
std::vector<double> beta_array(group_size[0], double(beta));
cblas_dgemm_batch(CblasColMajor, &transa_array[0], &transb_array[0], &m_array[0], &n_array[0], &k_array[0], &alpha_array[0],
reinterpret_cast<const double**>(&a_array[0]), &lda_array[0], reinterpret_cast<const double**>(&b_array[0]), &ldb_array[0], &beta_array[0],
reinterpret_cast<double**>(&c_array[0]), &ldc_array[0], 1, &group_size[0]);
}
else
{
std::vector<float> alpha_array(group_size[0], 1.0f);
std::vector<float> beta_array(group_size[0], float(beta));
cblas_sgemm_batch(CblasColMajor, &transa_array[0], &transb_array[0], &m_array[0], &n_array[0], &k_array[0], &alpha_array[0],
reinterpret_cast<const float**>(&a_array[0]), &lda_array[0], reinterpret_cast<const float**>(&b_array[0]), &ldb_array[0], &beta_array[0],
reinterpret_cast<float**>(&c_array[0]), &ldc_array[0], 1, &group_size[0]);
}
#endif
}
template <class ElemType>
bool CPUMatrix<ElemType>::AreEqual(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const ElemType threshold /*= 1e-8*/)
{
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
return false;
bool result = true;
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (abs(a(i, j) - b(i, j)) > threshold)
{
result = false;
break;
}
}
return result;
}
// see Matrix<ElemType>::TensorShuffleScaleAndAdd() for comments
template <class ElemType>
void CPUMatrix<ElemType>::TensorShuffleScaleAndAdd(ElemType keepWeight, const CPUMatrix<ElemType>& a, size_t D, size_t S, size_t M, size_t K, size_t T, ElemType scaleFactor, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
size_t N = D * S * M * K * T;
const auto pa = a.Data();
const auto pb = b.Data();
auto pc = c.Data();
// Note: This code is written to match a GPU implementation. It is not super-efficient on the CPU.
for (size_t na = 0; na < N; na++) // loop over all elements
{
// recover the 5 indices from the loop counter
size_t d = na % D;
size_t s = (na / D) % S;
size_t m = (na / D / S) % M;
size_t k = (na / D / S / M) % K;
size_t t = (na / D / S / M / K) % T;
// compute index for the a and b/c tensors
assert(na == (((t * K + k) * M + m) * S + s) * D + d); // input tensor of dimension (D x S x M x K x T)
size_t nb = (((t * S + s) * M + m) * K + k) * D + d; // output tensor of dimension (D x K x M x S x T): k/K and s/S swapped
assert(nb < N);
// perform the computation
ElemType cval = keepWeight ? keepWeight * pb[nb] : (ElemType)0; // if weight is 0 then don't bother to read memory (efficiency) or to multiply (NaN-safe)
cval += scaleFactor * pa[na];
pc[nb] = cval;
}
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Ones(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Zeros(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(0);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Eye(const size_t rows)
{
CPUMatrix<ElemType> c(rows, rows); // will initialize to 0
c.SetDiagonalValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomUniform(const size_t rows, const size_t cols, const ElemType low, const ElemType high, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetUniformRandomValue(low, high, seed);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomGaussian(const size_t rows, const size_t cols, const ElemType mean, const ElemType sigma, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetGaussianRandomValue(mean, sigma, seed);
return c;
}
template <class ElemType>
bool CPUMatrix<ElemType>::HasElement(const CPUMatrix<ElemType>& mat, const ElemType v)
{
bool bHas = false;
bool isvFinite = std::isfinite(v);
#pragma omp parallel for
for (long j = 0; j < mat.GetNumElements(); j++)
{
#pragma omp flush(bHas)
if (!bHas)
{
ElemType cur = mat.Data()[j];
if (isvFinite && std::isfinite(cur))
{
if (cur == v)
bHas = true;
}
else if (std::isnan(v) && std::isnan(cur))
bHas = true;
else if (std::isinf(v) && std::isinf(cur) && std::signbit(v) == std::signbit(cur))
bHas = true;
}
}
return bHas;
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]
// the inputs are two rwo vectors
// the output is a matrix of size(neg+1, col)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(negnumber + 1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
long m = (long) GetNumRows(), n = (long) GetNumCols(); // a and b are of size (1,n)
// #pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, j);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < m; i++)
{
us(i, j) = a(0, j) * b(0, (j + shift + i - 1) % n);
}
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProductWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
InvalidArgument("InnerProduct: Both matrices should be normal ones, not vectors");
// c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(negnumber + 1, n); // this line ischanged
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (std::is_same<ElemType, double>::value)
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
else if (std::is_same<ElemType, float>::value)
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
else
{
RuntimeError("Unsupported data format");
}
}
else
{
InvalidArgument("InnerProduct: Rowwise is not supported yet");
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = (ElemType) cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::GetARowByIndex(const CPUMatrix<ElemType>& a, size_t index)
{
if (a.IsEmpty())
LogicError("GetARowByIndex: the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
if (index < 0 || index >= m)
LogicError("GetARowByIndex: the row index is out of range.");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
auto& us = *this;
RequireSize(1, n);
for (long j = 0; j < n; j++)
{
us(0, j) = a(index, j);
}
return *this;
}
// input: a, a row vector
// input: b, a matrix. b.col == a.col
// input firstmatrixfixed: If true, keep a's order. Otherwise, keep b's order
// output: c, a matrix. c.size == b.size
/*
Example, a = [a1 a2 a3]
b = [b11 b12 b13;
b21 b22 b23 ]
if true:
shift = 1
then c = [a1*b12 a2*b13 a3*b11
a1*b22 a2*b23 a3*b21]
if shift = 2
then c = [ a1*b13 a2*b11 a3*b12
a1*b23 a2*b21 a3*b22]
i.e. we do column-wise shift
if false:
shift = 1
then c = [a2*b11 a3*b12 a1*b13
a2*b21 a3*b22 a1*b23]
shift = 2
then c = [ a3*b11 a1*b12 a2*b13
a3*b21 a1*b22 a2*b23]
*/
template <class ElemType>
void CPUMatrix<ElemType>::ConductRowElementMultiplyWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, size_t shift, bool bFirstmatrixfixed)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != 1 || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
c.RequireSize(k, l); // c must the same size of b
if (bFirstmatrixfixed)
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, j) * b(i, (j + shift) % l);
}
}
}
else
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, (j + shift) % l) * b(i, j);
}
}
}
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]. We will do element product with shift.
// inputs are 2 row vectors
// output is a row vector
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
// long m = (long)GetNumRows(), n = (long)GetNumCols(); // a and b are of size (1,n)
long n = (long) GetNumCols(); // a and b are of size (1,n)
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, (j + shift) % n);
}
return *this;
}
#pragma endregion Static BLAS Functions
// 'double' version of LogAdd
inline double LogAddD(double x, double y)
{
return LogAdd(x, y);
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::LogSumOfElements() const
{
ElemType fAlpha = (ElemType) LZERO;
ElemType* bufPtr = Data();
for (int k = 0; k < GetNumElements(); k++)
fAlpha = (ElemType) LogAddD(fAlpha, bufPtr[k]);
return fAlpha;
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFBackwardCompute(const CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& pair_scores)
{
int iNumPos = (int) lbls.GetNumCols();
int iNumLab = (int) lbls.GetNumRows();
int lastLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, iNumPos - 1) != 0)
{
lastLbl = ik;
break;
}
beta.RequireSize(iNumLab, iNumPos);
for (int t = iNumPos - 1; t >= 0; t--)
{
#pragma omp parallel for
for (int k = 0; k < iNumLab; k++)
{
_rcrfBackwardCompute(t, k, alpha, beta, pair_scores);
}
}
};
// Calculate alpha in forward-backward calculation. equation (6), (7) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// GPU x dimension corresponds to utterances, y dimension corresponds to phone sequence in each utterance
// prob (input): the posterior output from the network
// alpha (output): alpha for forward-backward calculation.
// phoneSeq (input): phone ID sequence for each utterance in this minibatch, each col is one utterance
// phoneBound (input): phone boundary (frame index) of each phone for each utterance in this minibatch, each col is one utterance
// uttToChanInd (input): map from utterance ID to minibatch channel ID. We need this because each channel may contain more than one utterance.
// uttFrameNum (input): the frame number of each utterance. The size of this vector = the number of all utterances in this minibatch
// uttBeginFrame(input): the position of the first frame of each utterance in the minibatch channel. We need this because each channel may contain more than one utterance.
// uttPhoneNum (input): the phone number of each utterance. The size of this vector = the number of all utterances in this minibatch
// numChannels (input): channel number in this minibatch
// uttNum (input): number of utterances
// t (input): time stamp to process
// maxPhoneNum (input): the max number of phones between utterances
// totalPhoneNum (input): the total number of phones of all utterances
// blankTokenId (input): id of the CTC blank token
// delayConstraint -- label output delay constraint introduced during training that allows to have shorter delay during inference.
// Alpha and Beta scores outside of the delay boundary are set to zero.
// Setting this parameter smaller will result in shorted delay between label output during decoding.
// delayConstraint=-1 means no constraint
template<class ElemType>
void _assignAlphaScore(
const ElemType *prob,
ElemType *alphaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
size_t numChannels,
const size_t uttNum,
const size_t t,
const size_t maxPhoneNum, // Maximum length of utterance in this MB
const size_t totalPhoneNum, // Total number of phones
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
// Index of the label in the sequence
// Current and previous phone indices in phoneSeq matrix
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
// Actual current phone label
size_t phoneId = (size_t)(phoneSeq[labelid]);
// Index of the current frame in minibatch
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
// Index of probability of observing phoneId at frame timeId
size_t probId = timeId*totalPhoneNum + phoneId;
size_t alphaId = maxPhoneNum* timeId + phoneSeqId; // alpha_t(s)
if (t == 0)
{
// Initialize recursion
if (phoneSeqId == 1 || phoneSeqId == 2)
{
alphaScore[alphaId] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
size_t timeId_1 = timeId - numChannels; // Index corresponding to (t-1)
size_t alphaId_0 = maxPhoneNum* timeId_1 + phoneSeqId; // alpha_{t-1}(s)
size_t alphaId_1 = alphaId_0 - 1; // alpha_{t-1}(s-1)
size_t alphaId_2 = alphaId_0 - 2; // alpha_{t-1}(s-2)
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId > 2)
{
size_t labelid_2 = labelid - 2;
// if current label is not blank and not equal prev non-blank label
if ((size_t)(phoneSeq[labelid]) != blankTokenId && phoneId != (size_t)(phoneSeq[labelid_2]))
{
x = LogAdd(x, alphaScore[alphaId_2]);
}
}
if (phoneSeqId > 1)
{
x = LogAdd(x, alphaScore[alphaId_1]);
}
x = LogAdd(x, alphaScore[alphaId_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId]; // Probability of observing given label at given time
else
ascore = 0;
alphaScore[alphaId] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t labelid_r = labelid + 2;
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_r]);
if (phoneId == blankTokenId)
{
// only constraint right side
if (t > phoneBoundId_r + delayConstraint - 1)
alphaScore[alphaId] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
alphaScore[alphaId] = LZERO;
}
}
}
}
}
}
}
// Calculate beta in forward-backward calculation, equation (10), (11) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignBetaScore(
const ElemType *prob,
ElemType *betaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const size_t numChannels,
const size_t uttNum,
const long t,
const size_t maxPhoneNum,
const size_t totalPhoneNum,
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
size_t labelid_2 = labelid + 2;
size_t phoneId = (LONG64)(phoneSeq[labelid]);
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
size_t probId = timeId*totalPhoneNum + phoneId;
size_t betaid = maxPhoneNum* timeId + phoneSeqId;
size_t timeId_1 = timeId + numChannels;
size_t betaid_0 = maxPhoneNum* timeId_1 + phoneSeqId;
size_t betaid_1 = betaid_0 + 1;
size_t betaid_2 = betaid_0 + 2;
if (t == frameNum - 1)
{
if (phoneSeqId == phoneNum - 3 || phoneSeqId == phoneNum - 2)
{
betaScore[betaid] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId < phoneNum - 3)
{
if (phoneSeq[labelid] != blankTokenId && phoneId != phoneSeq[labelid_2])
{
x = LogAdd(x, betaScore[betaid_2]);
}
}
if (phoneSeqId < phoneNum - 2)
{
x = LogAdd(x, betaScore[betaid_1]);
}
x = LogAdd(x, betaScore[betaid_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId];
else
ascore = 0;
betaScore[betaid] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_2]);
if (phoneId == blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint - 1)
betaScore[betaid] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
betaScore[betaid] = LZERO;
}
}
}
}
}
}
}
// Calculate CTC score. equation (8) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
template<class ElemType>
void _assignTotalScore(ElemType *betaScore,
std::vector<ElemType>& totalScore,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const size_t numChannels,
const size_t maxPhoneNum)
{
#pragma omp parallel for
for (int uttId = 0; uttId < uttNum; uttId++) {
if (uttId < uttNum)
{
LONG64 alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
betaScore[alphaId_0] = LogAdd(betaScore[alphaId_0 + 1], betaScore[alphaId_0 + 2]);
totalScore[uttId] = betaScore[alphaId_0];
}
}
}
// Calculate derivative, equation (15) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignCTCScore(
ElemType *CTCscore,
ElemType *prob,
ElemType *alphaScore,
ElemType *betaScore,
ElemType *phoneSeq,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const std::vector<size_t>& uttFrameNum,
const size_t numChannels,
const size_t maxPhoneNum,
const size_t totalPhoneNum)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
#pragma omp parallel for
for (int t = 0; t < uttFrameNum[uttId]; t++) {
size_t phoneNum = uttPhoneNum[uttId];
size_t alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
ElemType P_lx = betaScore[alphaId_0];
for (int s = 1; s < phoneNum - 1; s++)
{
long phoneId = phoneSeq[uttId*maxPhoneNum + s];
size_t alphaId = maxPhoneNum* timeId + s;
size_t probId = timeId*totalPhoneNum + phoneId;
if (phoneId != SIZE_MAX)
{
ElemType logoccu = alphaScore[alphaId] + betaScore[alphaId] - prob[probId] - (ElemType)P_lx;
CTCscore[probId] = LogAdd(CTCscore[probId], logoccu);
}
}
for (int s = 0; s < totalPhoneNum; s++)
{
size_t probId = timeId*totalPhoneNum + s;
ElemType logoccu = CTCscore[probId];
if (logoccu < LZERO)
CTCscore[probId] = 0.0f;
else
CTCscore[probId] = exp(logoccu);
}
}
}
}
template<class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCTCScore(
const CPUMatrix<ElemType>& prob, CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& phoneSeq, const CPUMatrix<ElemType>& phoneBoundary, CPUMatrix<ElemType> & totalScore, const std::vector<size_t>& uttToChanInd, const std::vector<size_t> & uttBeginFrame, const std::vector<size_t> & uttFrameNum,
const std::vector<size_t> & uttPhoneNum, const size_t numParallelSequences, const size_t maxFrameNum, const size_t blankTokenId, const int delayConstraint, const bool isColWise)
{
// Column wise representation of sequences in input matrices (each column is one sequence/utterance)
if (isColWise)
{
// Total number of phones
size_t totalPhoneNum = prob.GetNumRows();
size_t uttNum = uttFrameNum.size();
// Max number of phones in utterances in this minibatch
size_t maxPhoneNum = phoneSeq.GetNumRows();
for (size_t t = 0; t < maxFrameNum; t++)
{
_assignAlphaScore(prob.Data(), alpha.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
for (LONG64 t = maxFrameNum - 1; t >= 0; t--)
{
_assignBetaScore(prob.Data(), beta.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
std::vector<ElemType> scores(uttNum);
_assignTotalScore(beta.Data(), scores, uttNum, uttToChanInd, uttBeginFrame, numParallelSequences, maxPhoneNum);
_assignCTCScore(Data(), prob.Data(), alpha.Data(), beta.Data(), phoneSeq.Data(), uttNum, uttToChanInd,
uttBeginFrame, uttPhoneNum, uttFrameNum, numParallelSequences, maxPhoneNum, totalPhoneNum);
totalScore(0, 0) = 0.0;
for (size_t utt = 0; utt < uttNum; utt++)
{
totalScore(0,0) -= scores[utt];
}
return *this;
}
else {
LogicError("Only ColWise minibatch layout is supported.");
}
return *this;
}
/// the kernel function for RCRF backward computation
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfBackwardCompute(size_t t, size_t k, const CPUMatrix<ElemType>& alpha,
CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores)
{
size_t iNumLab = alpha.GetNumRows();
size_t iNumPos = alpha.GetNumCols();
ElemType fSum;
ElemType fTmp = (ElemType) LZERO;
if (t == iNumPos - 1)
{
fSum = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LogAddD(fSum, alpha(j, t));
}
fTmp = alpha(k, t) - fSum;
beta(k, t) = fTmp;
}
else
{
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LZERO;
for (int m = 0; m < iNumLab; m++)
{
fSum = (ElemType) LogAddD(fSum, alpha(m, t) + pair_scores(j, m));
}
fTmp = (ElemType) LogAddD(fTmp, beta(j, t + 1) + alpha(k, t) + pair_scores(j, k) - fSum);
}
beta(k, t) = fTmp;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFTransGrdCompute(const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd)
{
int iNumPos = (int) alpha.GetNumCols();
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
for (size_t tPos = 0; tPos < iNumPos; tPos++)
{
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
#pragma omp parallel for
for (int i = 0; i < iNumLab; i++)
{
_rcrfTransGrdCompute(i, lbls, alpha, beta, pair_scores, grd, tPos);
}
// transition score
int i = -1;
if (tPos == 0)
i = firstLbl;
else
{
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, tPos - 1) != 0)
{
i = ik;
break;
}
}
int j = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
{
if (lbls(ik, tPos) != 0)
{
j = ik;
break;
}
}
grd(j, i) -= 1.0;
}
};
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfTransGrdCompute(size_t i,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd,
const size_t tPos // position
)
{
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
{
ElemType fTmp = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
if (tPos == 0)
{
if (i == firstLbl)
{
fTmp = 0;
}
else
{
fTmp = (ElemType) LZERO;
}
}
else
{
fTmp = a(i, 0);
}
fTmp += pair_scores(j, i);
ElemType fSum = (ElemType) LZERO;
for (int k = 0; k < iNumLab; k++)
{
ElemType fTmp2;
if (tPos == 0)
{
if (k == firstLbl)
{
fTmp2 = 0;
}
else
{
fTmp2 = (ElemType) LZERO;
}
}
else
{
fTmp2 = a(k, 0);
}
fSum = (ElemType) LogAddD(fSum, fTmp2 + pair_scores(j, k));
}
fTmp -= fSum;
fTmp += b(j, 0);
grd(j, i) += exp(fTmp);
}
}
};
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DropFrame(const CPUMatrix<ElemType>& label, const CPUMatrix<ElemType>& gamma, const ElemType& threshhold)
{
auto& us = *this;
if (us.GetNumCols() != gamma.GetNumCols() || us.GetNumRows() != gamma.GetNumRows())
LogicError("DropFrame: target matrix is not in the same size as gamm matrix.");
#pragma omp parallel for
foreach_column (j, label)
{
bool dropframe = false;
foreach_row (i, label)
{
if (fabs(label(i, j) - 1.0f) < 0.1)
{
if (gamma(i, j) < threshhold)
dropframe = true;
break;
}
}
foreach_row (i, label)
{
us(i, j) = 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSequenceError(const ElemType hsmoothingWeight, const CPUMatrix<ElemType>& label,
const CPUMatrix<ElemType>& dnnoutput, const CPUMatrix<ElemType>& gamma, ElemType alpha)
{
auto& us = *this;
foreach_coord (i, j, us)
us(i, j) += alpha * (label(i, j) - (1 - hsmoothingWeight) * dnnoutput(i, j) - hsmoothingWeight * gamma(i, j));
return *this;
}
// note: this function does not depend on the <ElemType> parameter
template <class ElemType>
int CPUMatrix<ElemType>::SetNumThreads(int numThreads)
{
if (numThreads == 0) // use default
return numThreads;
int mthreads = (int) std::thread::hardware_concurrency();
if (numThreads <= 0)
numThreads = std::max(1, mthreads + numThreads);
if (numThreads > mthreads)
numThreads = mthreads;
#ifdef _OPENMP
omp_set_num_threads(numThreads);
numThreads = omp_get_max_threads();
#ifdef USE_MKL
mkl_set_num_threads(numThreads);
#elif defined(USE_OPENBLAS)
openblas_set_num_threads(numThreads);
#endif
#endif
return numThreads;
}
template <class ElemType>
int CPUMatrix<ElemType>::GetMaxNumThreads()
{
int numThreads = (int)std::thread::hardware_concurrency();
#ifdef _OPENMP
numThreads = omp_get_max_threads();
#endif
return numThreads;
}
// To ensure Intel MKL calls return the same results on all Intel or Intel compatible CPUs,
// the function set CBWR compatible mode.
template <class ElemType>
void CPUMatrix<ElemType>::SetCompatibleMode()
{
// mkl_cbwr_set not supported in MKLML yet
// Explanation on numeric diff: https://software.intel.com/en-us/articles/introduction-to-the-conditional-numerical-reproducibility-cnr
// #ifdef USE_MKL
// if (mkl_cbwr_set(MKL_CBWR_COMPATIBLE) != MKL_CBWR_SUCCESS)
// RuntimeError("Could not set MKL compatible mode.");
// #endif
}
template <class ElemType>
void CPUMatrix<ElemType>::SetOptimizationFlags(int flags)
{
m_optimizationFlags = flags;
}
template <class ElemType>
int CPUMatrix<ElemType>::GetOptimizationFlags()
{
return m_optimizationFlags;
}
// -----------------------------------------------------------------------
// entry points from Matrix.cpp; calls into CPUMatrixTensorOpImpl
// -----------------------------------------------------------------------
// perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
CPUMatrixTensorOpImpl<ElemType>(beta, a, *this, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
// perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 3>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
CPUMatrixTensorOpImpl<ElemType>(beta, a, b, *this, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
// perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 4>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides)
{
CPUMatrixTensorOpImpl<ElemType>(beta, a, b, c, *this, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmin() const
{
int minArg = -1;
ElemType minValue = std::numeric_limits<ElemType>::max();
#pragma omp parallel
{
int localMinArg = -1;
ElemType localMinValue = std::numeric_limits<ElemType>::max();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMinValue > Data()[index])
{
localMinArg = index;
localMinValue = Data()[index];
}
// If we have more then one min value, select the one with lower index.
else if ((localMinValue == Data()[index]) && (localMinArg > index))
{
localMinArg = index;
}
}
#pragma omp critical
{
if (minValue > localMinValue)
{
minArg = localMinArg;
minValue = localMinValue;
}
// If we have more then one min value, select the one with lower index.
else if ((minValue == localMinValue) && (minArg > localMinArg))
{
minArg = localMinArg;
}
}
}
return minArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmax() const
{
int maxArg = -1;
ElemType maxValue = std::numeric_limits<ElemType>::lowest();
#pragma omp parallel
{
int localMaxArg = -1;
ElemType localMaxValue = std::numeric_limits<ElemType>::lowest();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMaxValue < Data()[index])
{
localMaxArg = index;
localMaxValue = Data()[index];
}
// If we have more then one max value, select the one with lower index.
else if ((localMaxValue == Data()[index]) && (localMaxArg > index))
{
localMaxArg = index;
}
}
#pragma omp critical
{
if (maxValue < localMaxValue)
{
maxArg = localMaxArg;
maxValue = localMaxValue;
}
// If we have more then one max value, select the one with lower index.
else if ((maxValue == localMaxValue) && (maxArg > localMaxArg))
{
maxArg = localMaxArg;
}
}
}
return maxArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::ArgOp(ElementWiseOperator reductionOp) const
{
switch (reductionOp)
{
case ElementWiseOperator::opArgmin:
return Argmin();
break;
case ElementWiseOperator::opArgmax:
return Argmax();
break;
}
InvalidArgument("ArgOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented.");
return -1;
}
template <class ElemType>
void CPUMatrix<ElemType>::TensorArgOp(const CPUMatrix<ElemType>& a, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
CPUMatrixTensorArgOpImpl<ElemType>(a, *this, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
template <class ElemType>
void CPUMatrix<ElemType>::ScatterValues(ElemType* indices, ElemType* value, ElemType* data, ElemType alpha, size_t num_indices, size_t rows, size_t cols, size_t indices_step)
{
if (!indices || !value || !data)
LogicError("ScatterValues: input data is null.");
#pragma omp parallel
{
int ithread = omp_get_thread_num();
int nthread = omp_get_num_threads();
for (auto i = 0; i < num_indices; i++)
{
auto col_r = indices[i * indices_step];
if (std::isnan(col_r) || col_r < 0)
continue;
auto col = (size_t)col_r;
//ignore the elements that is not partitioned into this thread
if (col % nthread != ithread)
continue;
if (col >= cols)
InvalidArgument("ScatterValues: Indices map out of bounds. %ld >= %ld", (long int)col, (long int)cols);
auto index = col * rows;
auto offset = i * rows;
for (auto j = 0; j < rows; j++)
data[index + j] = data[index + j] + alpha * value[offset + j];
}
}
}
// We use Matrix<char> as the backing store for QuantizedMatrix
// Let's explicitly instantiate the methods we need for that purpose
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols, char* pArray, const size_t matrixFlags);
template CPUMatrix<char>::CPUMatrix();
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char> const&);
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char>&&);
template size_t CPUMatrix<char>::LocateElement(size_t, size_t) const;
template CPUMatrix<char> CPUMatrix<char>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<char>& CPUMatrix<char>::operator=(CPUMatrix<char>&&);
template void CPUMatrix<char>::SetValue(const char);
template void CPUMatrix<char>::SetValue(const size_t numRows, const size_t numCols, char* pArray, size_t matrixFlags);
template void CPUMatrix<char>::SetValue(CPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(CPUSparseMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUSparseMatrix<char> const&);
template void CPUMatrix<char>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<char>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template char* CPUMatrix<char>::CopyToArray(void) const;
template void CPUMatrix<char>::CopySection(size_t numRows, size_t numCols, char* dst, size_t colStride) const;
template void CPUMatrix<char>::Reshape(const size_t, const size_t);
// Support <short>
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols, short* pArray, const size_t matrixFlags);
template CPUMatrix<short>::CPUMatrix();
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short> const&);
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short>&&);
template size_t CPUMatrix<short>::LocateElement(size_t, size_t) const;
template CPUMatrix<short> CPUMatrix<short>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<short>& CPUMatrix<short>::operator=(CPUMatrix<short>&&);
template void CPUMatrix<short>::SetValue(const short);
template void CPUMatrix<short>::SetValue(const size_t numRows, const size_t numCols, short* pArray, size_t matrixFlags);
template void CPUMatrix<short>::SetValue(CPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(CPUSparseMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUSparseMatrix<short> const&);
template void CPUMatrix<short>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<short>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template short* CPUMatrix<short>::CopyToArray(void) const;
template void CPUMatrix<short>::CopySection(size_t numRows, size_t numCols, short* dst, size_t colStride) const;
template void CPUMatrix<short>::Reshape(const size_t, const size_t);
template CPUMatrix<int>::CPUMatrix(const size_t, const size_t, int*, const size_t);
}}}
|
opi.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main(int argc, char **argv) {
//seed random number generator
// Q2b: get the number of threads to run with from agrv and
// add OpenMP API code to set number of threads here
int Nthreads = atoi(argv[1]);
// tells openMP to use Nthreads threads
omp_set_num_threads(Nthreads);
struct drand48_data *drandData;
drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data));
// Q2c: add an OpenMP parallel region here, wherein each thread initializes
// one entry in drandData using srand48_r and seed based on thread number
#pragma omp parallel
{
int rank = omp_get_thread_num(); //thread's rank
long int seed = rank;
srand48_r(seed, drandData+rank);
}
long long int Ntrials = 10000000;
//need running tallies
long long int Ntotal=0;
long long int Ncircle=0;
// start run time
double start = omp_get_wtime();
#pragma omp parallel for reduction (+:Ncircle)
for (long long int n=0; n<Ntrials; n++) {
double rand1;
double rand2;
int rank = omp_get_thread_num();
//generate two random numbers (use the thread id to offset drandData)
drand48_r(drandData+rank, &rand1);
drand48_r(drandData+rank, &rand2);
double x = -1 + 2*rand1; //shift to [-1,1]
double y = -1 + 2*rand2;
//check if its in the circle
if (sqrt(x*x+y*y)<=1) Ncircle++;
Ntotal++;
if (n%100 ==0) {
double pi = 4.0*Ncircle/ (double) (n);
printf("Our estimate of pi is %g \n", pi);
}
}
double pi = 4.0*Ncircle/ (double) (Ntotal);
printf("Our final estimate of pi is %g \n", pi);
// stop time
double stop = omp_get_wtime();
// print run time
double runtime = stop - start;
printf("Runtime is %g \n", runtime);
free(drandData);
return 0;
}
|
lagrange.c | #include "lagrange.h"
#include "../../comms.h"
#include "../../shared.h"
#include "hale.h"
#include <float.h>
#include <math.h>
// Performs the Lagrangian step of the hydro solve
void lagrangian_phase(Mesh* mesh, UnstructuredMesh* umesh,
HaleData* hale_data) {
predictor(mesh, umesh, hale_data);
corrector(mesh, umesh, hale_data);
}
// Performs the predictor step of the Lagrangian phase
void predictor(Mesh* mesh, UnstructuredMesh* umesh, HaleData* hale_data) {
// Update the pressure
START_PROFILING(&compute_profile);
equation_of_state(umesh->ncells, hale_data->energy0, hale_data->density0,
hale_data->pressure0);
STOP_PROFILING(&compute_profile, "equation_of_state");
// Calculate the nodal volume and sound speed
START_PROFILING(&compute_profile);
calc_nodal_vol_and_c(
umesh->nnodes, umesh->nodes_to_faces_offsets, umesh->nodes_to_faces,
umesh->faces_to_nodes_offsets, umesh->faces_to_nodes,
umesh->faces_to_cells0, umesh->faces_to_cells1, umesh->nodes_x0,
umesh->nodes_y0, umesh->nodes_z0, umesh->cell_centroids_x,
umesh->cell_centroids_y, umesh->cell_centroids_z, hale_data->energy0,
hale_data->nodal_volumes, hale_data->nodal_soundspeed);
STOP_PROFILING(&compute_profile, "calc_nodal_vol_and_c");
// Sets all of the subcell forces to 0
START_PROFILING(&compute_profile);
zero_subcell_forces(umesh->ncells, umesh->cells_to_nodes_offsets,
hale_data->subcell_force_x, hale_data->subcell_force_y,
hale_data->subcell_force_z);
STOP_PROFILING(&compute_profile, "zero_subcell_forces");
START_PROFILING(&compute_profile);
calc_subcell_force_from_pressure(
umesh->ncells, umesh->cells_to_faces_offsets,
umesh->cells_to_nodes_offsets, umesh->cells_to_faces,
umesh->faces_to_nodes_offsets, umesh->faces_to_nodes,
umesh->cells_to_nodes, umesh->faces_cclockwise_cell, umesh->nodes_x0,
umesh->nodes_y0, umesh->nodes_z0, hale_data->pressure0,
hale_data->subcell_force_x, hale_data->subcell_force_y,
hale_data->subcell_force_z);
STOP_PROFILING(&compute_profile, "calc_subcell_force_from_pressure");
START_PROFILING(&compute_profile);
scale_soundspeed(umesh->nnodes, hale_data->nodal_volumes,
hale_data->nodal_soundspeed);
STOP_PROFILING(&compute_profile, "scale_soundspeed");
START_PROFILING(&compute_profile);
calc_artificial_viscosity(
umesh->ncells, hale_data->visc_coeff1, hale_data->visc_coeff2,
umesh->cells_to_nodes_offsets, umesh->cells_to_nodes,
umesh->faces_cclockwise_cell, umesh->nodes_x0, umesh->nodes_y0,
umesh->nodes_z0, umesh->cell_centroids_x, umesh->cell_centroids_y,
umesh->cell_centroids_z, hale_data->velocity_x0, hale_data->velocity_y0,
hale_data->velocity_z0, hale_data->nodal_soundspeed,
hale_data->nodal_mass, hale_data->nodal_volumes, hale_data->limiter,
hale_data->subcell_force_x, hale_data->subcell_force_y,
hale_data->subcell_force_z, umesh->faces_to_nodes_offsets,
umesh->faces_to_nodes, umesh->cells_to_faces_offsets,
umesh->cells_to_faces);
STOP_PROFILING(&compute_profile, "calc_artificial_viscosity");
START_PROFILING(&compute_profile);
calc_new_velocity(umesh->nnodes, mesh->dt, umesh->nodes_to_cells_offsets,
umesh->nodes_to_cells, umesh->cells_to_nodes_offsets,
umesh->cells_to_nodes, hale_data->subcell_force_x,
hale_data->subcell_force_y, hale_data->subcell_force_z,
hale_data->nodal_mass, hale_data->velocity_x0,
hale_data->velocity_y0, hale_data->velocity_z0,
hale_data->velocity_x1, hale_data->velocity_y1,
hale_data->velocity_z1);
STOP_PROFILING(&compute_profile, "calc_new_velocity");
// TODO: NEED TO WORK OUT HOW TO HANDLE BOUNDARY CONDITIONS REASONABLY
handle_unstructured_reflect_3d(
umesh->nnodes, umesh->boundary_index, umesh->boundary_type,
umesh->boundary_normal_x, umesh->boundary_normal_y,
umesh->boundary_normal_z, hale_data->velocity_x1, hale_data->velocity_y1,
hale_data->velocity_z1);
// Move the nodes by the predicted velocity
START_PROFILING(&compute_profile);
move_nodes(umesh->nnodes, mesh->dt, umesh->nodes_x0, umesh->nodes_y0,
umesh->nodes_z0, hale_data->velocity_x1, hale_data->velocity_y1,
hale_data->velocity_z1, umesh->nodes_x1, umesh->nodes_y1,
umesh->nodes_z1);
STOP_PROFILING(&compute_profile, "move_nodes");
init_cell_centroids(umesh->ncells, umesh->cells_to_nodes_offsets,
umesh->cells_to_nodes, umesh->nodes_x1, umesh->nodes_y1,
umesh->nodes_z1, umesh->cell_centroids_x,
umesh->cell_centroids_y, umesh->cell_centroids_z);
set_timestep(umesh->ncells, umesh->nodes_x1, umesh->nodes_y1, umesh->nodes_z1,
hale_data->energy0, &mesh->dt, umesh->cells_to_faces_offsets,
umesh->cells_to_faces, umesh->faces_to_nodes_offsets,
umesh->faces_to_nodes);
// Calculate the predicted energy
START_PROFILING(&compute_profile);
calc_predicted_energy(umesh->ncells, mesh->dt, umesh->cells_to_nodes_offsets,
umesh->cells_to_nodes, hale_data->velocity_x1,
hale_data->velocity_y1, hale_data->velocity_z1,
hale_data->subcell_force_x, hale_data->subcell_force_y,
hale_data->subcell_force_z, hale_data->energy0,
hale_data->cell_mass, hale_data->energy1);
STOP_PROFILING(&compute_profile, "calc_predicted_energy");
// Using the new volume, calculate the predicted density
START_PROFILING(&compute_profile);
calc_predicted_density(
umesh->ncells, umesh->cells_to_faces_offsets, umesh->cells_to_faces,
umesh->faces_to_nodes_offsets, umesh->faces_to_nodes, umesh->nodes_x1,
umesh->nodes_y1, umesh->nodes_z1, umesh->cell_centroids_x,
umesh->cell_centroids_y, umesh->cell_centroids_z, hale_data->cell_mass,
hale_data->density1);
STOP_PROFILING(&compute_profile, "calc_predicted_density");
// Calculate the time centered pressure from mid point between rezoned and
// predicted pressures
START_PROFILING(&compute_profile);
time_center_pressure(umesh->ncells, hale_data->energy1, hale_data->density1,
hale_data->pressure0, hale_data->pressure1);
STOP_PROFILING(&compute_profile, "time_center_pressure");
// Prepare time centered variables for the corrector step
START_PROFILING(&compute_profile);
time_center_nodes(umesh->nnodes, umesh->nodes_x0, umesh->nodes_y0,
umesh->nodes_z0, umesh->nodes_x1, umesh->nodes_y1,
umesh->nodes_z1);
STOP_PROFILING(&compute_profile, "time_center_nodes");
}
// Performs the corrector step of the Lagrangian phase
void corrector(Mesh* mesh, UnstructuredMesh* umesh, HaleData* hale_data) {
// Sets all of the subcell forces to 0
START_PROFILING(&compute_profile);
zero_subcell_forces(umesh->ncells, umesh->cells_to_nodes_offsets,
hale_data->subcell_force_x, hale_data->subcell_force_y,
hale_data->subcell_force_z);
STOP_PROFILING(&compute_profile, "calc_nodal_mass_vol");
// Calculate the nodal mass
START_PROFILING(&compute_profile);
calc_nodal_vol_and_c(
umesh->nnodes, umesh->nodes_to_faces_offsets, umesh->nodes_to_faces,
umesh->faces_to_nodes_offsets, umesh->faces_to_nodes,
umesh->faces_to_cells0, umesh->faces_to_cells1, umesh->nodes_x1,
umesh->nodes_y1, umesh->nodes_z1, umesh->cell_centroids_x,
umesh->cell_centroids_y, umesh->cell_centroids_z, hale_data->energy1,
hale_data->nodal_volumes, hale_data->nodal_soundspeed);
STOP_PROFILING(&compute_profile, "calc_nodal_vol_and_c");
START_PROFILING(&compute_profile);
scale_soundspeed(umesh->nnodes, hale_data->nodal_volumes,
hale_data->nodal_soundspeed);
STOP_PROFILING(&compute_profile, "scale_soundspeed");
// Calculate the pressure gradients
START_PROFILING(&compute_profile);
calc_subcell_force_from_pressure(
umesh->ncells, umesh->cells_to_faces_offsets,
umesh->cells_to_nodes_offsets, umesh->cells_to_faces,
umesh->faces_to_nodes_offsets, umesh->faces_to_nodes,
umesh->cells_to_nodes, umesh->faces_cclockwise_cell, umesh->nodes_x1,
umesh->nodes_y1, umesh->nodes_z1, hale_data->pressure1,
hale_data->subcell_force_x, hale_data->subcell_force_y,
hale_data->subcell_force_z);
STOP_PROFILING(&compute_profile, "node_force_from_pressure");
calc_artificial_viscosity(
umesh->ncells, hale_data->visc_coeff1, hale_data->visc_coeff2,
umesh->cells_to_nodes_offsets, umesh->cells_to_nodes,
umesh->faces_cclockwise_cell, umesh->nodes_x1, umesh->nodes_y1,
umesh->nodes_z1, umesh->cell_centroids_x, umesh->cell_centroids_y,
umesh->cell_centroids_z, hale_data->velocity_x1, hale_data->velocity_y1,
hale_data->velocity_z1, hale_data->nodal_soundspeed,
hale_data->nodal_mass, hale_data->nodal_volumes, hale_data->limiter,
hale_data->subcell_force_x, hale_data->subcell_force_y,
hale_data->subcell_force_z, umesh->faces_to_nodes_offsets,
umesh->faces_to_nodes, umesh->cells_to_faces_offsets,
umesh->cells_to_faces);
START_PROFILING(&compute_profile);
// Updates and time center velocity in the corrector step
update_and_time_center_velocity(
umesh->nnodes, mesh->dt, umesh->nodes_to_cells_offsets,
umesh->nodes_to_cells, umesh->cells_to_nodes_offsets,
umesh->cells_to_nodes, hale_data->nodal_mass, hale_data->subcell_force_x,
hale_data->subcell_force_y, hale_data->subcell_force_z,
hale_data->velocity_x0, hale_data->velocity_y0, hale_data->velocity_z0,
hale_data->velocity_x1, hale_data->velocity_y1, hale_data->velocity_z1);
STOP_PROFILING(&compute_profile, "calc_new_velocity");
handle_unstructured_reflect_3d(
umesh->nnodes, umesh->boundary_index, umesh->boundary_type,
umesh->boundary_normal_x, umesh->boundary_normal_y,
umesh->boundary_normal_z, hale_data->velocity_x0, hale_data->velocity_y0,
hale_data->velocity_z0);
// Advances the nodes using the corrected velocity
START_PROFILING(&compute_profile);
advance_nodes_corrected(umesh->nnodes, mesh->dt, hale_data->velocity_x0,
hale_data->velocity_y0, hale_data->velocity_z0,
umesh->nodes_x0, umesh->nodes_y0, umesh->nodes_z0);
STOP_PROFILING(&compute_profile, "advance_nodes_corrected");
set_timestep(umesh->ncells, umesh->nodes_x0, umesh->nodes_y0, umesh->nodes_z0,
hale_data->energy1, &mesh->dt, umesh->cells_to_faces_offsets,
umesh->cells_to_faces, umesh->faces_to_nodes_offsets,
umesh->faces_to_nodes);
// Calculate the corrected energy
START_PROFILING(&compute_profile);
calc_corrected_energy(umesh->ncells, mesh->dt, umesh->cells_to_nodes_offsets,
umesh->cells_to_nodes, hale_data->velocity_x0,
hale_data->velocity_y0, hale_data->velocity_z0,
hale_data->subcell_force_x, hale_data->subcell_force_y,
hale_data->subcell_force_z, hale_data->cell_mass,
hale_data->energy0);
STOP_PROFILING(&compute_profile, "calc_corrected_energy");
init_cell_centroids(umesh->ncells, umesh->cells_to_nodes_offsets,
umesh->cells_to_nodes, umesh->nodes_x0, umesh->nodes_y0,
umesh->nodes_z0, umesh->cell_centroids_x,
umesh->cell_centroids_y, umesh->cell_centroids_z);
// Using the new corrected volume, calculate the density
START_PROFILING(&compute_profile);
calc_corrected_density(
umesh->ncells, umesh->cells_to_faces_offsets, umesh->cells_to_faces,
umesh->faces_to_nodes_offsets, umesh->faces_to_nodes, umesh->nodes_x0,
umesh->nodes_y0, umesh->nodes_z0, umesh->cell_centroids_x,
umesh->cell_centroids_y, umesh->cell_centroids_z, hale_data->cell_mass,
hale_data->cell_volume, hale_data->density0);
STOP_PROFILING(&compute_profile, "calc_corrected_density");
}
// A simple ideal gas equation of state
void equation_of_state(const int ncells, const double* energy,
const double* density, double* pressure) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
pressure[(cc)] = (GAM - 1.0) * energy[(cc)] * density[(cc)];
}
}
// Calculates the nodal volume and sound speed
void calc_nodal_vol_and_c(const int nnodes, const int* nodes_to_faces_offsets,
const int* nodes_to_faces,
const int* faces_to_nodes_offsets,
const int* faces_to_nodes, const int* faces_to_cells0,
const int* faces_to_cells1, const double* nodes_x,
const double* nodes_y, const double* nodes_z,
const double* cell_centroids_x,
const double* cell_centroids_y,
const double* cell_centroids_z, const double* energy,
double* nodal_volumes, double* nodal_soundspeed) {
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
const int node_to_faces_off = nodes_to_faces_offsets[(nn)];
const int nfaces_by_node =
nodes_to_faces_offsets[(nn + 1)] - node_to_faces_off;
nodal_volumes[(nn)] = 0.0;
nodal_soundspeed[(nn)] = 0.0;
// Consider all faces attached to node
for (int ff = 0; ff < nfaces_by_node; ++ff) {
const int face_index = nodes_to_faces[(node_to_faces_off + ff)];
if (face_index == -1) {
continue;
}
// Determine the offset into the list of nodes
const int face_to_nodes_off = faces_to_nodes_offsets[(face_index)];
const int nnodes_by_face =
faces_to_nodes_offsets[(face_index + 1)] - face_to_nodes_off;
// Find node center and location of current node on face
vec_t face_c = {0.0, 0.0, 0.0};
int node_in_face_c;
for (int nn2 = 0; nn2 < nnodes_by_face; ++nn2) {
const int node_index = faces_to_nodes[(face_to_nodes_off + nn2)];
face_c.x += nodes_x[(node_index)] / nnodes_by_face;
face_c.y += nodes_y[(node_index)] / nnodes_by_face;
face_c.z += nodes_z[(node_index)] / nnodes_by_face;
// Choose the node in the list of nodes attached to the face
if (nn == node_index) {
node_in_face_c = nn2;
}
}
// Fetch the nodes attached to our current node on the current face
int local_nodes[2];
local_nodes[0] =
(node_in_face_c - 1 >= 0)
? faces_to_nodes[(face_to_nodes_off + node_in_face_c - 1)]
: faces_to_nodes[(face_to_nodes_off + nnodes_by_face - 1)];
local_nodes[1] =
(node_in_face_c + 1 < nnodes_by_face)
? faces_to_nodes[(face_to_nodes_off + node_in_face_c + 1)]
: faces_to_nodes[(face_to_nodes_off)];
// Fetch the cells attached to our current face
int local_cells[2];
local_cells[0] = faces_to_cells0[(face_index)];
local_cells[1] = faces_to_cells1[(face_index)];
// Add contributions from both of the cells attached to the face
for (int cc = 0; cc < 2; ++cc) {
const int cell_index = local_cells[(cc)];
if (cell_index == -1) {
continue;
}
// Add contributions for both edges attached to our current node
for (int nn2 = 0; nn2 < 2; ++nn2) {
const double subsubcell_vol = calc_subsubcell_volume(
cell_index, local_nodes[(nn2)], nn, face_c, nodes_x, nodes_y,
nodes_z, cell_centroids_x, cell_centroids_y, cell_centroids_z);
nodal_soundspeed[(nn)] +=
sqrt(GAM * (GAM - 1.0) * energy[(cell_index)]) * subsubcell_vol;
nodal_volumes[(nn)] += subsubcell_vol;
}
}
}
}
}
// Calculates the volume of a subsubcell
double calc_subsubcell_volume(const int cc, const int rnode_index,
const int node_index, vec_t face_c,
const double* nodes_x, const double* nodes_y,
const double* nodes_z,
const double* cell_centroids_x,
const double* cell_centroids_y,
const double* cell_centroids_z) {
// Construct the vectors describing an edge tetrahedron
const vec_t ad = {(face_c.x - nodes_x[(node_index)]),
(face_c.y - nodes_y[(node_index)]),
(face_c.z - nodes_z[(node_index)])};
const vec_t bd = {nodes_x[(rnode_index)] - nodes_x[(node_index)],
nodes_y[(rnode_index)] - nodes_y[(node_index)],
nodes_z[(rnode_index)] - nodes_z[(node_index)]};
const vec_t cd = {cell_centroids_x[(cc)] - nodes_x[(node_index)],
cell_centroids_y[(cc)] - nodes_y[(node_index)],
cell_centroids_z[(cc)] - nodes_z[(node_index)]};
// Fetch the are vector of one of the faces of the tetrahedron
const vec_t area = {0.5 * (ad.y * bd.z - ad.z * bd.y),
-0.5 * (ad.x * bd.z - ad.z * bd.x),
0.5 * (ad.x * bd.y - ad.y * bd.x)};
// Determine the volume using standard irregular tetrahedron formula
const double edge_subcell_vol =
fabs(cd.x * area.x + cd.y * area.y + cd.z * area.z) / 3.0;
// The subsubcell is half the volume of the edge subcell
return 0.5 * edge_subcell_vol;
}
// Sets all of the subcell forces to 0
void zero_subcell_forces(const int ncells, const int* cells_to_nodes_offsets,
double* subcell_force_x, double* subcell_force_y,
double* subcell_force_z) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int subcell_index = cell_to_nodes_off + nn;
subcell_force_x[(subcell_index)] = 0.0;
subcell_force_y[(subcell_index)] = 0.0;
subcell_force_z[(subcell_index)] = 0.0;
}
}
}
// Calculate the subcell force from pressure gradients
void calc_subcell_force_from_pressure(
const int ncells, const int* cells_to_faces_offsets,
const int* cells_to_nodes_offsets, const int* cells_to_faces,
const int* faces_to_nodes_offsets, const int* faces_to_nodes,
const int* cells_to_nodes, const int* faces_cclockwise_cell,
const double* nodes_x, const double* nodes_y, const double* nodes_z,
const double* pressure, double* subcell_force_x, double* subcell_force_y,
double* subcell_force_z) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_faces_off = cells_to_faces_offsets[(cc)];
const int nfaces_by_cell =
cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off;
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
// Look at all of the faces attached to the cell
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int face_to_nodes_off = faces_to_nodes_offsets[(face_index)];
const int nnodes_by_face =
faces_to_nodes_offsets[(face_index + 1)] - face_to_nodes_off;
// Calculate the face center... SHOULD WE PRECOMPUTE?
vec_t face_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_face, nodes_x, nodes_y, nodes_z, faces_to_nodes,
face_to_nodes_off, &face_c);
// Now we will sum the contributions at each of the nodes
// TODO: THERE IS SOME SYMMETRY HERE THAT MEANS WE MIGHT BE ABLE TO
// OPTIMISE
for (int nn2 = 0; nn2 < nnodes_by_face; ++nn2) {
// Fetch the nodes attached to our current node on the current face
const int node_index = faces_to_nodes[(face_to_nodes_off + nn2)];
const int face_clockwise = (faces_cclockwise_cell[(face_index)] != cc);
const int next_node = (nn2 == nnodes_by_face - 1) ? 0 : nn2 + 1;
const int prev_node = (nn2 == 0) ? nnodes_by_face - 1 : nn2 - 1;
const int rnode_off = (face_clockwise ? prev_node : next_node);
const int rnode_index = faces_to_nodes[(face_to_nodes_off + rnode_off)];
// Get the halfway point on the right edge
vec_t half_edge = {
0.5 * (nodes_x[(node_index)] + nodes_x[(rnode_index)]),
0.5 * (nodes_y[(node_index)] + nodes_y[(rnode_index)]),
0.5 * (nodes_z[(node_index)] + nodes_z[(rnode_index)])};
// Setup basis on plane of tetrahedron
vec_t a = {(nodes_x[(node_index)] - half_edge.x),
(nodes_y[(node_index)] - half_edge.y),
(nodes_z[(node_index)] - half_edge.z)};
vec_t b = {(face_c.x - half_edge.x), (face_c.y - half_edge.y),
(face_c.z - half_edge.z)};
// Calculate the area vector A using cross product
vec_t A = {0.5 * (a.y * b.z - a.z * b.y),
-0.5 * (a.x * b.z - a.z * b.x),
0.5 * (a.x * b.y - a.y * b.x)};
int subcell_index;
int rsubcell_index;
for (int nn3 = 0; nn3 < nnodes_by_cell; ++nn3) {
if (cells_to_nodes[(cell_to_nodes_off + nn3)] == node_index) {
subcell_index = cell_to_nodes_off + nn3;
} else if (cells_to_nodes[(cell_to_nodes_off + nn3)] == rnode_index) {
rsubcell_index = cell_to_nodes_off + nn3;
}
}
subcell_force_x[(subcell_index)] += pressure[(cc)] * A.x;
subcell_force_y[(subcell_index)] += pressure[(cc)] * A.y;
subcell_force_z[(subcell_index)] += pressure[(cc)] * A.z;
subcell_force_x[(rsubcell_index)] += pressure[(cc)] * A.x;
subcell_force_y[(rsubcell_index)] += pressure[(cc)] * A.y;
subcell_force_z[(rsubcell_index)] += pressure[(cc)] * A.z;
}
}
}
}
// Scale the soundspeed by the inverse of the nodal volume
void scale_soundspeed(const int nnodes, const double* nodal_volumes,
double* nodal_soundspeed) {
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
nodal_soundspeed[(nn)] /= nodal_volumes[(nn)];
}
}
// Calculate the time centered evolved velocities, by calculating the predicted
// values at the new timestep and averaging with current velocity
void calc_new_velocity(const int nnodes, const double dt,
const int* nodes_to_cells_offsets,
const int* nodes_to_cells,
const int* cells_to_nodes_offsets,
const int* cells_to_nodes, const double* subcell_force_x,
const double* subcell_force_y,
const double* subcell_force_z, const double* nodal_mass,
const double* velocity_x0, const double* velocity_y0,
const double* velocity_z0, double* velocity_x1,
double* velocity_y1, double* velocity_z1) {
#pragma omp parallel for simd
for (int nn = 0; nn < nnodes; ++nn) {
const int node_to_cells_off = nodes_to_cells_offsets[(nn)];
const int ncells_by_node =
nodes_to_cells_offsets[(nn + 1)] - node_to_cells_off;
// Accumulate the force at this node
vec_t node_force = {0.0, 0.0, 0.0};
for (int cc = 0; cc < ncells_by_node; ++cc) {
const int cell_index = nodes_to_cells[(node_to_cells_off + cc)];
const int cell_to_nodes_off = cells_to_nodes_offsets[(cell_index)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cell_index + 1)] - cell_to_nodes_off;
// ARRGHHHH
int nn2;
for (nn2 = 0; nn2 < nnodes_by_cell; ++nn2) {
if (cells_to_nodes[(cell_to_nodes_off + nn2)] == nn) {
break;
}
}
const int subcell_index = cell_to_nodes_off + nn2;
node_force.x += subcell_force_x[(subcell_index)];
node_force.y += subcell_force_y[(subcell_index)];
node_force.z += subcell_force_z[(subcell_index)];
}
// Determine the predicted velocity
velocity_x1[(nn)] =
velocity_x0[(nn)] + dt * node_force.x / nodal_mass[(nn)];
velocity_y1[(nn)] =
velocity_y0[(nn)] + dt * node_force.y / nodal_mass[(nn)];
velocity_z1[(nn)] =
velocity_z0[(nn)] + dt * node_force.z / nodal_mass[(nn)];
// Calculate the time centered velocity
velocity_x1[(nn)] = 0.5 * (velocity_x0[(nn)] + velocity_x1[(nn)]);
velocity_y1[(nn)] = 0.5 * (velocity_y0[(nn)] + velocity_y1[(nn)]);
velocity_z1[(nn)] = 0.5 * (velocity_z0[(nn)] + velocity_z1[(nn)]);
}
}
// Moves the nodes to the next time level
void move_nodes(const int nnodes, const double dt, const double* nodes_x0,
const double* nodes_y0, const double* nodes_z0,
const double* velocity_x1, const double* velocity_y1,
const double* velocity_z1, double* nodes_x1, double* nodes_y1,
double* nodes_z1) {
#pragma omp parallel for simd
for (int nn = 0; nn < nnodes; ++nn) {
nodes_x1[(nn)] = nodes_x0[(nn)] + dt * velocity_x1[(nn)];
nodes_y1[(nn)] = nodes_y0[(nn)] + dt * velocity_y1[(nn)];
nodes_z1[(nn)] = nodes_z0[(nn)] + dt * velocity_z1[(nn)];
}
}
// calculates a new density from the pressure gradients
void calc_predicted_density(const int ncells, const int* cells_to_faces_offsets,
const int* cells_to_faces,
const int* faces_to_nodes_offsets,
const int* faces_to_nodes, const double* nodes_x1,
const double* nodes_y1, const double* nodes_z1,
const double* cell_centroids_x,
const double* cell_centroids_y,
const double* cell_centroids_z,
const double* cell_mass, double* density1) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_faces_off = cells_to_faces_offsets[(cc)];
const int nfaces_by_cell =
cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off;
const double cell_volume = calc_cell_volume(
cc, nfaces_by_cell, cell_to_faces_off, cells_to_faces,
faces_to_nodes_offsets, faces_to_nodes, nodes_x1, nodes_y1, nodes_z1,
cell_centroids_x, cell_centroids_y, cell_centroids_z);
density1[(cc)] = cell_mass[(cc)] / cell_volume;
}
}
// Time centers the pressure
void time_center_pressure(const int ncells, const double* energy1,
const double* density1, const double* pressure0,
double* pressure1) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
// Calculate the predicted pressure from the equation of state
pressure1[(cc)] = (GAM - 1.0) * energy1[(cc)] * density1[(cc)];
// Determine the time centered pressure
pressure1[(cc)] = 0.5 * (pressure0[(cc)] + pressure1[(cc)]);
}
}
// Time centers the nodal positions
void time_center_nodes(const int nnodes, const double* nodes_x0,
const double* nodes_y0, const double* nodes_z0,
double* nodes_x1, double* nodes_y1, double* nodes_z1) {
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
nodes_x1[(nn)] = 0.5 * (nodes_x1[(nn)] + nodes_x0[(nn)]);
nodes_y1[(nn)] = 0.5 * (nodes_y1[(nn)] + nodes_y0[(nn)]);
nodes_z1[(nn)] = 0.5 * (nodes_z1[(nn)] + nodes_z0[(nn)]);
}
}
// Updates and time center velocity in the corrector step
void update_and_time_center_velocity(
const int nnodes, const double dt, const int* nodes_to_cells_offsets,
const int* nodes_to_cells, const int* cells_to_nodes_offsets,
const int* cells_to_nodes, const double* nodal_mass,
const double* subcell_force_x, const double* subcell_force_y,
const double* subcell_force_z, double* velocity_x0, double* velocity_y0,
double* velocity_z0, double* velocity_x1, double* velocity_y1,
double* velocity_z1) {
#pragma omp parallel for simd
for (int nn = 0; nn < nnodes; ++nn) {
const int node_to_cells_off = nodes_to_cells_offsets[(nn)];
const int ncells_by_node =
nodes_to_cells_offsets[(nn + 1)] - node_to_cells_off;
// Consider all faces attached to node
vec_t node_force = {0.0, 0.0, 0.0};
for (int cc = 0; cc < ncells_by_node; ++cc) {
const int cell_index = nodes_to_cells[(node_to_cells_off + cc)];
const int cell_to_nodes_off = cells_to_nodes_offsets[(cell_index)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cell_index + 1)] - cell_to_nodes_off;
int nn2;
for (nn2 = 0; nn2 < nnodes_by_cell; ++nn2) {
if (cells_to_nodes[(cell_to_nodes_off + nn2)] == nn) {
break;
}
}
node_force.x += subcell_force_x[(cell_to_nodes_off + nn2)];
node_force.y += subcell_force_y[(cell_to_nodes_off + nn2)];
node_force.z += subcell_force_z[(cell_to_nodes_off + nn2)];
}
// TODO: Do we actually need to update the velocities back here??
// Calculate the new velocities
velocity_x1[(nn)] += dt * node_force.x / nodal_mass[(nn)];
velocity_y1[(nn)] += dt * node_force.y / nodal_mass[(nn)];
velocity_z1[(nn)] += dt * node_force.z / nodal_mass[(nn)];
// Calculate the corrected time centered velocities
velocity_x0[(nn)] = 0.5 * (velocity_x1[(nn)] + velocity_x0[(nn)]);
velocity_y0[(nn)] = 0.5 * (velocity_y1[(nn)] + velocity_y0[(nn)]);
velocity_z0[(nn)] = 0.5 * (velocity_z1[(nn)] + velocity_z0[(nn)]);
}
}
// Advances the nodes using the corrected velocity
void advance_nodes_corrected(const int nnodes, const double dt,
const double* velocity_x0,
const double* velocity_y0,
const double* velocity_z0, double* nodes_x0,
double* nodes_y0, double* nodes_z0) {
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
nodes_x0[(nn)] += dt * velocity_x0[(nn)];
nodes_y0[(nn)] += dt * velocity_y0[(nn)];
nodes_z0[(nn)] += dt * velocity_z0[(nn)];
}
}
// Calculate the new energy base on subcell forces
void calc_predicted_energy(const int ncells, const double dt,
const int* cells_to_nodes_offsets,
const int* cells_to_nodes, const double* velocity_x1,
const double* velocity_y1, const double* velocity_z1,
const double* subcell_force_x,
const double* subcell_force_y,
const double* subcell_force_z, const double* energy0,
const double* cell_mass, double* energy1) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
double cell_force = 0.0;
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
const int subcell_index = cell_to_nodes_off + nn;
cell_force +=
(velocity_x1[(node_index)] * subcell_force_x[(subcell_index)] +
velocity_y1[(node_index)] * subcell_force_y[(subcell_index)] +
velocity_z1[(node_index)] * subcell_force_z[(subcell_index)]);
}
energy1[(cc)] = energy0[(cc)] - dt * cell_force / cell_mass[(cc)];
}
}
// Calculates the energy from the correct subcell pressures and velocity
void calc_corrected_energy(const int ncells, const double dt,
const int* cells_to_nodes_offsets,
const int* cells_to_nodes, const double* velocity_x0,
const double* velocity_y0, const double* velocity_z0,
const double* subcell_force_x,
const double* subcell_force_y,
const double* subcell_force_z,
const double* cell_mass, double* energy0) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
double cell_force = 0.0;
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
const int subcell_index = cell_to_nodes_off + nn;
cell_force +=
(velocity_x0[(node_index)] * subcell_force_x[(subcell_index)] +
velocity_y0[(node_index)] * subcell_force_y[(subcell_index)] +
velocity_z0[(node_index)] * subcell_force_z[(subcell_index)]);
}
energy0[(cc)] -= dt * cell_force / cell_mass[(cc)];
}
}
// Calculates the density from the corrected volume
void calc_corrected_density(
const int ncells, const int* cells_to_faces_offsets,
const int* cells_to_faces, const int* faces_to_nodes_offsets,
const int* faces_to_nodes, const double* nodes_x, const double* nodes_y,
const double* nodes_z, const double* cell_centroids_x,
const double* cell_centroids_y, const double* cell_centroids_z,
const double* cell_mass, double* cell_volume, double* density) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_faces_off = cells_to_faces_offsets[(cc)];
const int nfaces_by_cell =
cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off;
cell_volume[(cc)] = calc_cell_volume(
cc, nfaces_by_cell, cell_to_faces_off, cells_to_faces,
faces_to_nodes_offsets, faces_to_nodes, nodes_x, nodes_y, nodes_z,
cell_centroids_x, cell_centroids_y, cell_centroids_z);
// Update the density using the new volume
density[(cc)] = cell_mass[(cc)] / cell_volume[(cc)];
}
}
// Calculates the volume in a cell by tetrahedral decomposition
double calc_cell_volume(const int cc, const int nfaces_by_cell,
const int cell_to_faces_off, const int* cells_to_faces,
const int* faces_to_nodes_offsets,
const int* faces_to_nodes, const double* nodes_x,
const double* nodes_y, const double* nodes_z,
const double* cell_centroids_x,
const double* cell_centroids_y,
const double* cell_centroids_z) {
double cell_vol = 0.0;
// Look at all of the faces attached to the cell
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int face_to_nodes_off = faces_to_nodes_offsets[(face_index)];
const int nnodes_by_face =
faces_to_nodes_offsets[(face_index + 1)] - face_to_nodes_off;
vec_t face_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_face, nodes_x, nodes_y, nodes_z, faces_to_nodes,
face_to_nodes_off, &face_c);
// Now we will sum the contributions at each of the nodes
for (int nn2 = 0; nn2 < nnodes_by_face; ++nn2) {
// Fetch the nodes attached to our current node on the current face
const int node_index = faces_to_nodes[(face_to_nodes_off + nn2)];
const int rnode_index =
(nn2 + 1 < nnodes_by_face)
? faces_to_nodes[(face_to_nodes_off + nn2 + 1)]
: faces_to_nodes[(face_to_nodes_off)];
cell_vol += 2.0 * calc_subsubcell_volume(
cc, rnode_index, node_index, face_c, nodes_x,
nodes_y, nodes_z, cell_centroids_x,
cell_centroids_y, cell_centroids_z);
}
}
return cell_vol;
}
// Controls the timestep for the simulation
void set_timestep(const int ncells, const double* nodes_x,
const double* nodes_y, const double* nodes_z,
const double* energy, double* dt, int* cells_to_faces_offsets,
int* cells_to_faces, int* faces_to_nodes_offsets,
int* faces_to_nodes) {
// Calculate the timestep based on the computational mesh and CFL
// condition
double local_dt = DBL_MAX;
START_PROFILING(&compute_profile);
#pragma omp parallel for reduction(min : local_dt)
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_faces_off = cells_to_faces_offsets[(cc)];
const int nfaces_by_cell =
cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off;
double shortest_edge = DBL_MAX;
// Look at all of the faces attached to the cell
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int face_to_nodes_off = faces_to_nodes_offsets[(face_index)];
const int nnodes_by_face =
faces_to_nodes_offsets[(face_index + 1)] - face_to_nodes_off;
for (int nn = 0; nn < nnodes_by_face; ++nn) {
// Fetch the nodes attached to our current node on the current face
const int node_index = faces_to_nodes[(face_to_nodes_off + nn)];
const int rnode_index =
(nn + 1 < nnodes_by_face)
? faces_to_nodes[(face_to_nodes_off + nn + 1)]
: faces_to_nodes[(face_to_nodes_off)];
const double x_component =
nodes_x[(node_index)] - nodes_x[(rnode_index)];
const double y_component =
nodes_y[(node_index)] - nodes_y[(rnode_index)];
const double z_component =
nodes_z[(node_index)] - nodes_z[(rnode_index)];
// Find the shortest edge of this cell
shortest_edge = min(shortest_edge, sqrt(x_component * x_component +
y_component * y_component +
z_component * z_component));
}
}
const double soundspeed = sqrt(GAM * (GAM - 1.0) * energy[(cc)]);
local_dt = min(local_dt, shortest_edge / soundspeed);
}
STOP_PROFILING(&compute_profile, __func__);
*dt = CFL * local_dt;
printf("Timestep %.8fs\n", *dt);
}
// Calculates the artificial viscous forces for momentum acceleration
void calc_artificial_viscosity(
const int ncells, const double visc_coeff1, const double visc_coeff2,
const int* cells_to_nodes_offsets, const int* cells_to_nodes,
const int* faces_cclockwise_cell, const double* nodes_x,
const double* nodes_y, const double* nodes_z,
const double* cell_centroids_x, const double* cell_centroids_y,
const double* cell_centroids_z, const double* velocity_x,
const double* velocity_y, const double* velocity_z,
const double* nodal_soundspeed, const double* nodal_mass,
const double* nodal_volumes, const double* limiter, double* subcell_force_x,
double* subcell_force_y, double* subcell_force_z,
int* faces_to_nodes_offsets, int* faces_to_nodes,
int* cells_to_faces_offsets, int* cells_to_faces) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_faces_off = cells_to_faces_offsets[(cc)];
const int nfaces_by_cell =
cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off;
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
// Look at all of the faces attached to the cell
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int face_to_nodes_off = faces_to_nodes_offsets[(face_index)];
const int nnodes_by_face =
faces_to_nodes_offsets[(face_index + 1)] - face_to_nodes_off;
vec_t face_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_face, nodes_x, nodes_y, nodes_z, faces_to_nodes,
face_to_nodes_off, &face_c);
// Now we will sum the contributions at each of the nodes
for (int nn2 = 0; nn2 < nnodes_by_face; ++nn2) {
const int node_index = faces_to_nodes[(face_to_nodes_off + nn2)];
const int face_clockwise = (faces_cclockwise_cell[(face_index)] != cc);
const int next_node = (nn2 == nnodes_by_face - 1) ? 0 : nn2 + 1;
const int prev_node = (nn2 == 0) ? nnodes_by_face - 1 : nn2 - 1;
const int rnode_off = (face_clockwise ? prev_node : next_node);
const int rnode_index = faces_to_nodes[(face_to_nodes_off + rnode_off)];
// Get the halfway point on the right edge
vec_t half_edge = {
0.5 * (nodes_x[(node_index)] + nodes_x[(rnode_index)]),
0.5 * (nodes_y[(node_index)] + nodes_y[(rnode_index)]),
0.5 * (nodes_z[(node_index)] + nodes_z[(rnode_index)])};
// Setup basis on plane of tetrahedron
vec_t a = {(cell_centroids_x[(cc)] - face_c.x),
(cell_centroids_y[(cc)] - face_c.y),
(cell_centroids_z[(cc)] - face_c.z)};
vec_t b = {(half_edge.x - face_c.x), (half_edge.y - face_c.y),
(half_edge.z - face_c.z)};
vec_t S = {0.5 * (a.y * b.z - a.z * b.y),
-0.5 * (a.x * b.z - a.z * b.x),
0.5 * (a.x * b.y - a.y * b.x)};
// Calculate the velocity gradients
vec_t dvel = {velocity_x[(node_index)] - velocity_x[(rnode_index)],
velocity_y[(node_index)] - velocity_y[(rnode_index)],
velocity_z[(node_index)] - velocity_z[(rnode_index)]};
const double dvel_mag =
sqrt(dvel.x * dvel.x + dvel.y * dvel.y + dvel.z * dvel.z);
// Calculate the unit vectors of the velocity gradients
vec_t dvel_unit = {(dvel_mag != 0.0) ? dvel.x / dvel_mag : 0.0,
(dvel_mag != 0.0) ? dvel.y / dvel_mag : 0.0,
(dvel_mag != 0.0) ? dvel.z / dvel_mag : 0.0};
// Get the edge-centered density
double nodal_density =
nodal_mass[(node_index)] / nodal_volumes[(node_index)];
double rnodal_density =
nodal_mass[(rnode_index)] / nodal_volumes[(rnode_index)];
const double density_edge = (2.0 * nodal_density * rnodal_density) /
(nodal_density + rnodal_density);
// Calculate the artificial viscous force term for the edge
double expansion_term = (dvel.x * S.x + dvel.y * S.y + dvel.z * S.z);
// If the cell is compressing, calculate the edge forces and add
// their contributions to the node forces
if (expansion_term <= 0.0) {
// Calculate the minimum soundspeed
const double cs = min(nodal_soundspeed[(node_index)],
nodal_soundspeed[(rnode_index)]);
const double t = 0.25 * (GAM + 1.0);
const double edge_visc_force_x =
density_edge *
(visc_coeff2 * t * fabs(dvel.x) +
sqrt(visc_coeff2 * visc_coeff2 * t * t * dvel.x * dvel.x +
visc_coeff1 * visc_coeff1 * cs * cs)) *
(1.0 - limiter[(node_index)]) * expansion_term * dvel_unit.x;
const double edge_visc_force_y =
density_edge *
(visc_coeff2 * t * fabs(dvel.y) +
sqrt(visc_coeff2 * visc_coeff2 * t * t * dvel.y * dvel.y +
visc_coeff1 * visc_coeff1 * cs * cs)) *
(1.0 - limiter[(node_index)]) * expansion_term * dvel_unit.y;
const double edge_visc_force_z =
density_edge *
(visc_coeff2 * t * fabs(dvel.z) +
sqrt(visc_coeff2 * visc_coeff2 * t * t * dvel.z * dvel.z +
visc_coeff1 * visc_coeff1 * cs * cs)) *
(1.0 - limiter[(node_index)]) * expansion_term * dvel_unit.z;
int subcell_index;
int rsubcell_index;
for (int nn3 = 0; nn3 < nnodes_by_cell; ++nn3) {
if (cells_to_nodes[(cell_to_nodes_off + nn3)] == node_index) {
subcell_index = cell_to_nodes_off + nn3;
} else if (cells_to_nodes[(cell_to_nodes_off + nn3)] ==
rnode_index) {
rsubcell_index = cell_to_nodes_off + nn3;
}
}
// Add the contributions of the edge based artifical viscous terms
// to the main force terms
subcell_force_x[(subcell_index)] += edge_visc_force_x;
subcell_force_y[(subcell_index)] += edge_visc_force_y;
subcell_force_z[(subcell_index)] += edge_visc_force_z;
subcell_force_x[(rsubcell_index)] -= edge_visc_force_x;
subcell_force_y[(rsubcell_index)] -= edge_visc_force_y;
subcell_force_z[(rsubcell_index)] -= edge_visc_force_z;
}
}
}
}
}
|
GB_binop__land_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_int8
// A.*B function (eWiseMult): GB_AemultB__land_int8
// A*D function (colscale): GB_AxD__land_int8
// D*A function (rowscale): GB_DxB__land_int8
// C+=B function (dense accum): GB_Cdense_accumB__land_int8
// C+=b function (dense accum): GB_Cdense_accumb__land_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_int8
// C=scalar+B GB_bind1st__land_int8
// C=scalar+B' GB_bind1st_tran__land_int8
// C=A+scalar GB_bind2nd__land_int8
// C=A'+scalar GB_bind2nd_tran__land_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT8 || GxB_NO_LAND_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__land_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
search.h | // -*- C++ -*-
// Copyright (C) 2007, 2008 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 2, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this library; see the file COPYING. If not, write to
// the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
// MA 02111-1307, USA.
// As a special exception, you may use this file as part of a free
// software library without restriction. Specifically, if other files
// instantiate templates or use macros or inline functions from this
// file, or you compile this file and link it with other files to
// produce an executable, this file does not by itself cause the
// resulting executable to be covered by the GNU General Public
// License. This exception does not however invalidate any other
// reasons why the executable file might be covered by the GNU General
// Public License.
/** @file parallel/search.h
* @brief Parallel implementation base for std::search() and
* std::search_n().
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_SEARCH_H
#define _GLIBCXX_PARALLEL_SEARCH_H 1
#include <bits/stl_algobase.h>
#include <parallel/parallel.h>
#include <parallel/equally_split.h>
namespace __gnu_parallel
{
/**
* @brief Precalculate advances for Knuth-Morris-Pratt algorithm.
* @param elements Begin iterator of sequence to search for.
* @param length Length of sequence to search for.
* @param advances Returned offsets.
*/
template<typename RandomAccessIterator, typename _DifferenceTp>
void
calc_borders(RandomAccessIterator elements, _DifferenceTp length,
_DifferenceTp* off)
{
typedef _DifferenceTp difference_type;
off[0] = -1;
if (length > 1)
off[1] = 0;
difference_type k = 0;
for (difference_type j = 2; j <= length; j++)
{
while ((k >= 0) && !(elements[k] == elements[j-1]))
k = off[k];
off[j] = ++k;
}
}
// Generic parallel find algorithm (requires random access iterator).
/** @brief Parallel std::search.
* @param begin1 Begin iterator of first sequence.
* @param end1 End iterator of first sequence.
* @param begin2 Begin iterator of second sequence.
* @param end2 End iterator of second sequence.
* @param pred Find predicate.
* @return Place of finding in first sequences. */
template<typename _RandomAccessIterator1,
typename _RandomAccessIterator2,
typename Pred>
_RandomAccessIterator1
search_template(_RandomAccessIterator1 begin1, _RandomAccessIterator1 end1,
_RandomAccessIterator2 begin2, _RandomAccessIterator2 end2,
Pred pred)
{
typedef std::iterator_traits<_RandomAccessIterator1> traits_type;
typedef typename traits_type::difference_type difference_type;
_GLIBCXX_CALL((end1 - begin1) + (end2 - begin2));
difference_type pattern_length = end2 - begin2;
// Pattern too short.
if(pattern_length <= 0)
return end1;
// Last point to start search.
difference_type input_length = (end1 - begin1) - pattern_length;
// Where is first occurrence of pattern? defaults to end.
difference_type result = (end1 - begin1);
difference_type *splitters;
// Pattern too long.
if (input_length < 0)
return end1;
omp_lock_t result_lock;
omp_init_lock(&result_lock);
thread_index_t num_threads =
std::max<difference_type>(1,
std::min<difference_type>(input_length, get_max_threads()));
difference_type advances[pattern_length];
calc_borders(begin2, pattern_length, advances);
# pragma omp parallel num_threads(num_threads)
{
# pragma omp single
{
num_threads = omp_get_num_threads();
splitters = new difference_type[num_threads + 1];
equally_split(input_length, num_threads, splitters);
}
thread_index_t iam = omp_get_thread_num();
difference_type start = splitters[iam], stop = splitters[iam + 1];
difference_type pos_in_pattern = 0;
bool found_pattern = false;
while (start <= stop && !found_pattern)
{
// Get new value of result.
#pragma omp flush(result)
// No chance for this thread to find first occurrence.
if (result < start)
break;
while (pred(begin1[start + pos_in_pattern],
begin2[pos_in_pattern]))
{
++pos_in_pattern;
if (pos_in_pattern == pattern_length)
{
// Found new candidate for result.
omp_set_lock(&result_lock);
result = std::min(result, start);
omp_unset_lock(&result_lock);
found_pattern = true;
break;
}
}
// Make safe jump.
start += (pos_in_pattern - advances[pos_in_pattern]);
pos_in_pattern =
(advances[pos_in_pattern] < 0) ? 0 : advances[pos_in_pattern];
}
} //parallel
omp_destroy_lock(&result_lock);
delete[] splitters;
// Return iterator on found element.
return (begin1 + result);
}
} // end namespace
#endif
|
pst_fmt_plug.c | /* PST cracker patch for JtR. Hacked together during July of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>
*
* Optimizations and shift to pkzip CRC32 code done by JimF
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Uses code from crc32_fmt_plug.c written by JimF */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pst;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pst);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "crc32.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#include <omp.h>
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 16384 // core i7 no HT
#endif
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define FORMAT_LABEL "PST"
#define FORMAT_NAME "custom CRC-32"
#define FORMAT_TAG "$pst$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 8
#define BINARY_SIZE 4
#define SALT_SIZE 0
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 256
static struct fmt_tests tests[] = {
{"$pst$a9290513", "openwall"}, /* "jfuck jw" works too ;) */
{"$pst$50e099bc", "password"},
{"$pst$00000000", ""},
{"$pst$e3da3318", "xxx"},
{"$pst$a655dd18", "XYz123"},
{"$pst$29b14070", "thisisalongstring"},
{"$pst$25b44615", "string with space"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out);
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
p = ciphertext + FORMAT_TAG_LEN;
if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra)
return 0;
return 1;
}
static void set_key(char *key, int index) {
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1);
}
static int cmp_all(void *binary, int count)
{
uint32_t crc=*((uint32_t*)binary), i;
for (i = 0; i < count; ++i)
if (crc == crypt_out[i]) return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((uint32_t*)binary) == crypt_out[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int i;
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < count; ++i) {
CRC32_t crc = 0;
unsigned char *p = (unsigned char*)saved_key[i];
while (*p)
crc = jtr_crc32(crc, *p++);
crypt_out[i] = crc;
}
return count;
}
static void *get_binary(char *ciphertext)
{
static uint32_t *out;
if (!out)
out = mem_alloc_tiny(sizeof(uint32_t), MEM_ALIGN_WORD);
sscanf(&ciphertext[5], "%x", out);
return out;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int get_hash_0(int index) { return crypt_out[index] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index] & PH_MASK_6; }
struct fmt_main fmt_pst = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_TRUNC | FMT_8_BIT | FMT_NOT_EXACT,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__le_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_01__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int16)
// A*D function (colscale): GB (_AxD__le_int16)
// D*A function (rowscale): GB (_DxB__le_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int16)
// C=scalar+B GB (_bind1st__le_int16)
// C=scalar+B' GB (_bind1st_tran__le_int16)
// C=A+scalar GB (_bind2nd__le_int16)
// C=A'+scalar GB (_bind2nd_tran__le_int16)
// C type: bool
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT16 || GxB_NO_LE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
contact_residualbased_elimination_builder_and_solver_with_constraints.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
//
#if !defined(KRATOS_CONTACT_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS )
#define KRATOS_CONTACT_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS
/* System includes */
#include <unordered_set>
#include <unordered_map>
/* External includes */
/* Project includes */
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_with_constraints.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ContactResidualBasedEliminationBuilderAndSolverWithConstraints
* @ingroup ContactStructuralMechanicsApplication
* @brief Current class provides an implementation for contact builder and solving operations. (elimination)
* @details The RHS is constituted by the unbalanced loads (residual). Degrees of freedom are reordered putting the restrained degrees of freedom at the end of the system ordered in reverse order with respect to the DofSet and not considered the inactive ones. Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information. Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Vicente Mataix Ferrandiz
* @tparam TSparseSpace The sparse matrix system considered
* @tparam TDenseSpace The dense matrix system
* @tparam TLinearSolver The type of linear solver considered
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ContactResidualBasedEliminationBuilderAndSolverWithConstraints
: public ResidualBasedEliminationBuilderAndSolverWithConstraints< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ContactResidualBasedEliminationBuilderAndSolverWithConstraints
KRATOS_CLASS_POINTER_DEFINITION(ContactResidualBasedEliminationBuilderAndSolverWithConstraints);
/// Definitions dependent of the base class
typedef ResidualBasedEliminationBuilderAndSolverWithConstraints< TSparseSpace, TDenseSpace, TLinearSolver > BaseType;
/// Base types definitions
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodeType NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
/// General containers type definitions
typedef ModelPart::MasterSlaveConstraintContainerType ConstraintContainerType;
/// Additional definitions
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef typename BaseType::EquationIdVectorType EquationIdVectorType;
typedef typename BaseType::DofsVectorType DofsVectorType;
/// DoF types definition
typedef typename BaseType::DofType DofType;
typedef typename BaseType::DofPointerType DofPointerType;
/// The DoF pointer vector type definition
typedef std::vector<typename DofType::Pointer> DofPointerVectorType;
/// The size type
typedef std::size_t SizeType;
/// The index type
typedef std::size_t IndexType;
/// Index set definition
typedef std::unordered_set<IndexType> IndexSetType;
///@}
///@name Enum's
///@{
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
ContactResidualBasedEliminationBuilderAndSolverWithConstraints(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ContactResidualBasedEliminationBuilderAndSolverWithConstraints() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief It organises the dofset in order to speed up the building phase
* @param rModelPart The model part to compute
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
if(rModelPart.MasterSlaveConstraints().size() > 0)
SetUpSystemWithConstraints(rModelPart);
else
BaseSetUpSystem(rModelPart);
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
if(rModelPart.MasterSlaveConstraints().size() > 0)
SetUpDofSetWithConstraints(pScheme, rModelPart);
else
BaseType::SetUpDofSet(pScheme, rModelPart);
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element and condition its Dofs.
* @details Equivalent to the ResidualBasedEliminationBuilderAndSolver but with constraints. The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSetWithConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
)
{
KRATOS_TRY;
// We are going to enforce the existence of constraints for LM for each displacement dof
if (rModelPart.NodesBegin()->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER)) {
// Reorder constrains
IndexType constraint_id = 1;
for (auto& constrain : rModelPart.MasterSlaveConstraints()) {
constrain.SetId(constraint_id);
++constraint_id;
}
// Auxiliar dofs lists
DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations
// Contributions to the system
LocalSystemMatrixType transformation_matrix = LocalSystemMatrixType(0, 0);
LocalSystemVectorType constant_vector = LocalSystemVectorType(0);
// Reference constraint
const auto& r_clone_constraint = KratosComponents<MasterSlaveConstraint>::Get("LinearMasterSlaveConstraint");
#pragma omp parallel firstprivate(transformation_matrix, constant_vector, dof_list, second_dof_list)
{
// Current process info
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// A buffer to store auxiliar constraints
ConstraintContainerType constraints_buffer;
// Gets the array of constraints from the modeler
auto& r_constraints_array = rModelPart.MasterSlaveConstraints();
const int number_of_constraints = static_cast<int>(r_constraints_array.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i < number_of_constraints; ++i) {
auto it_const = r_constraints_array.begin() + i;
// Gets list of Dof involved on every element
it_const->GetDofList(dof_list, second_dof_list, r_current_process_info);
it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info);
DofPointerVectorType slave_dofs, master_dofs;
bool create_lm_constraint = false;
// We check if we have SLAVE nodes in the master dofs
bool slave_nodes_master_dof = false;
// Master DoFs
for (auto& p_dof : second_dof_list) {
if (IsDisplacementDof(*p_dof)) {
const IndexType node_id = p_dof->Id();
auto pnode = rModelPart.pGetNode(node_id);
if (pnode->Is(SLAVE)) { // The nodes computing contact are the slave nodes
slave_nodes_master_dof = true;
break;
}
}
}
// Slave DoFs
for (auto& p_dof : dof_list) {
if (IsDisplacementDof(*p_dof)) {
const IndexType node_id = p_dof->Id();
const auto& r_variable = p_dof->GetVariable();
auto pnode = rModelPart.pGetNode(node_id);
if (pnode->IsNot(INTERFACE) || slave_nodes_master_dof) { // Nodes from the contact interface cannot be slave DoFs
if (r_variable == DISPLACEMENT_X) {
slave_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_X));
} else if (r_variable == DISPLACEMENT_Y) {
slave_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Y));
} else if (r_variable == DISPLACEMENT_Z) {
slave_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Z));
}
} else { // We remove it
it_const->Set(TO_ERASE);
}
}
}
// Master DoFs
if (slave_nodes_master_dof) { // The nodes computing contact are the slave nodes
for (auto& p_dof : second_dof_list) {
if (IsDisplacementDof(*p_dof)) {
const IndexType node_id = p_dof->Id();
const auto& r_variable = p_dof->GetVariable();
auto pnode = rModelPart.pGetNode(node_id);
if (r_variable == DISPLACEMENT_X) {
master_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_X));
} else if (r_variable == DISPLACEMENT_Y) {
master_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Y));
} else if (r_variable == DISPLACEMENT_Z) {
master_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Z));
}
}
}
}
// We check if we create constraints
if ((slave_dofs.size() == dof_list.size()) &&
(master_dofs.size() == second_dof_list.size())) {
create_lm_constraint = true;
}
// We create the new constraint
if (create_lm_constraint) {
auto p_constraint = r_clone_constraint.Create(constraint_id + i + 1, master_dofs, slave_dofs, transformation_matrix, constant_vector);
(constraints_buffer).insert((constraints_buffer).begin(), p_constraint);
}
}
// We transfer
#pragma omp critical
{
rModelPart.AddMasterSlaveConstraints(constraints_buffer.begin(),constraints_buffer.end());
}
}
}
// We remove the marked constraints
rModelPart.RemoveMasterSlaveConstraintsFromAllLevels(TO_ERASE);
KRATOS_INFO_IF("ContactResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() > 0)) <<
"Model part after creating new constraints" << rModelPart << std::endl;
// Calling base SetUpDofSetWithConstraints
BaseType::SetUpDofSetWithConstraints(pScheme, rModelPart);
KRATOS_CATCH("");
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method computes the equivalent coounter part of the SetUpSystem when using constraints
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystemWithConstraints(ModelPart& rModelPart)
{
KRATOS_TRY
// First we set up the system of equations without constraints
BaseSetUpSystem(rModelPart);
// Add the computation of the global ids of the solvable dofs
IndexType counter = 0;
for (auto& dof : BaseType::mDofSet) {
if (dof.EquationId() < BaseType::mEquationSystemSize) {
auto it = BaseType::mDoFSlaveSet.find(dof);
if (it == BaseType::mDoFSlaveSet.end()) {
++counter;
}
}
}
// The total system of equations to be solved
BaseType::mDoFToSolveSystemSize = counter;
KRATOS_CATCH("ContactResidualBasedEliminationBuilderAndSolverWithConstraints::FormulateGlobalMasterSlaveRelations failed ..");
}
/**
* @brief It organises the dofset in order to speed up the building phase (base one)
* @param rModelPart The model part to compute
*/
void BaseSetUpSystem(ModelPart& rModelPart)
{
/**
* Idem to the not contact version, except that if we fix the displacement in one slave node we should fix the corresponding LM for consistency
*/
// We create a set of dofs of the displacement slave dofs with LM associated
std::unordered_map<IndexType, IndexSetType> set_nodes_with_lm_associated;
if (rModelPart.HasSubModelPart("Contact"))
set_nodes_with_lm_associated.reserve(rModelPart.GetSubModelPart("Contact").NumberOfNodes());
// Allocating auxiliar parameters
IndexType node_id;
// We start the dof loop
for (auto& i_dof : BaseType::mDofSet) {
node_id = i_dof.Id();
if (IsLMDof(i_dof))
set_nodes_with_lm_associated.insert({node_id, IndexSetType({})});
}
// Auxiliar keys
const IndexType key_lm_x = VECTOR_LAGRANGE_MULTIPLIER_X.Key();
const IndexType key_lm_y = VECTOR_LAGRANGE_MULTIPLIER_Y.Key();
const IndexType key_lm_z = VECTOR_LAGRANGE_MULTIPLIER_Z.Key();
// We see which LM block
for (auto& i_dof : BaseType::mDofSet) {
node_id = i_dof.Id();
auto it = set_nodes_with_lm_associated.find(node_id);
if ( it != set_nodes_with_lm_associated.end()) {
if (i_dof.IsFixed()) {
const auto& r_variable = i_dof.GetVariable();
auto& aux_set = (it->second);
if (r_variable == DISPLACEMENT_X) {
aux_set.insert(key_lm_x);
} else if (r_variable == DISPLACEMENT_Y) {
aux_set.insert(key_lm_y);
} else if (r_variable == DISPLACEMENT_Z) {
aux_set.insert(key_lm_z);
}
}
}
}
// We do now the loop over the dofs
for (auto& i_dof : BaseType::mDofSet) {
if (i_dof.IsFree()) {
node_id = i_dof.Id();
auto it = set_nodes_with_lm_associated.find(node_id);
if (it != set_nodes_with_lm_associated.end()) {
auto& aux_set = it->second;
if (aux_set.find((i_dof.GetVariable()).Key()) != aux_set.end()) {
i_dof.FixDof();
}
}
}
}
BaseType::SetUpSystem(rModelPart);
}
/**
* @brief Checks if the degree of freedom belongs to a displacement DoF
* @param rDoF The degree of freedom
* @return True if the DoF corresponds with a displacement dof
*/
static inline bool IsDisplacementDof(const DofType& rDoF)
{
const auto& r_variable = rDoF.GetVariable();
if (r_variable == DISPLACEMENT_X ||
r_variable == DISPLACEMENT_Y ||
r_variable == DISPLACEMENT_Z) {
return true;
}
return false;
}
/**
* @brief Checks if the degree of freedom belongs to a LM DoF
* @param rDoF The degree of freedom
* @return True if the DoF corresponds with a LM dof
*/
static inline bool IsLMDof(const DofType& rDoF)
{
const auto& r_variable = rDoF.GetVariable();
if (r_variable == VECTOR_LAGRANGE_MULTIPLIER_X ||
r_variable == VECTOR_LAGRANGE_MULTIPLIER_Y ||
r_variable == VECTOR_LAGRANGE_MULTIPLIER_Z) {
return true;
}
return false;
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ContactResidualBasedEliminationBuilderAndSolverWithConstraints */
///@}
///@name Type Definitions */
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_CONTACT_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS defined */
|
opencl_tc_fmt_plug.c | /*
* TrueCrypt volume OpenCL support to John The Ripper (RIPEMD-160 only)
*
* Based on CPU format originally written by Alain Espinosa <alainesp at
* gmail.com> in 2012.
* Copyright (c) 2015, magnum
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if HAVE_OPENCL
#define FMT_STRUCT fmt_ocl_tc
#if FMT_EXTERNS_H
extern struct fmt_main FMT_STRUCT;
#elif FMT_REGISTERS_H
john_register_one(&FMT_STRUCT);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "options.h"
#include "formats.h"
#include "crc32.h"
#include "johnswap.h"
#include "aes.h"
#include "pbkdf2_hmac_ripemd160.h"
#include "loader.h"
#include "common-opencl.h"
#define FORMAT_LABEL "truecrypt-opencl"
#define FORMAT_NAME "TrueCrypt AES256_XTS"
#define ALGORITHM_NAME "RIPEMD160 OpenCL"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
/* 64 is the actual maximum used by Truecrypt software as of version 7.1a */
#define PLAINTEXT_LENGTH 64
#define MAX_CIPHERTEXT_LENGTH (512*2+32)
#define SALT_SIZE sizeof(struct cust_salt)
#define SALT_ALIGN 4
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define TAG_RIPEMD160 "truecrypt_RIPEMD_160$"
#define TAG_RIPEMD160_LEN (sizeof(TAG_RIPEMD160)-1)
#define IS_RIPEMD160 2
#define MAX_PASSSZ 64
#define PASS_BUFSZ 256
#define KPOOL_SZ 64
#define MAX_KFILE_SZ 1048576 /* 1 MB */
#define MAX_KEYFILES 256
static unsigned char (*first_block_dec)[16];
unsigned char (*keyfiles_data)[MAX_KFILE_SZ];
int (*keyfiles_length);
#define KEYLEN PLAINTEXT_LENGTH
#define OUTLEN 64
#define SALTLEN 64
typedef struct {
unsigned int length;
unsigned char v[KEYLEN];
} pbkdf2_password;
typedef struct {
unsigned int v[(OUTLEN+3)/4];
} pbkdf2_hash;
typedef struct {
unsigned char salt[SALTLEN];
} pbkdf2_salt;
struct cust_salt {
unsigned char salt[64];
unsigned char bin[512-64];
int loop_inc;
int num_iterations;
int hash_type;
int nkeyfiles;
} *psalt;
static struct fmt_tests tests_ripemd160[] = {
{"truecrypt_RIPEMD_160$b9f118f89d2699cbe42cad7bc2c61b0822b3d6e57e8d43e79f55666aa30572676c3aced5f0900af223e9fcdf43ac39637640977f546eb714475f8e2dbf5368bfb80a671d7796d4a88c36594acd07081b7ef0fbead3d3a0ff2b295e9488a5a2747ed97905436c28c636f408b36b0898aad3c4e9566182bd55f80e97a55ad9cf20899599fb775f314067c9f7e6153b9544bfbcffb53eef5a34b515e38f186a2ddcc7cd3aed635a1fb4aab98b82d57341ec6ae52ad72e43f41aa251717082d0858bf2ccc69a7ca00daceb5b325841d70bb2216e1f0d4dc936b9f50ebf92dbe2abec9bc3babea7a4357fa74a7b2bcce542044552bbc0135ae35568526e9bd2afde0fa4969d6dc680cf96f7d82ec0a75b6170c94e3f2b6fd98f2e6f01db08ce63f1b6bcf5ea380ed6f927a5a8ced7995d83ea8e9c49238e8523d63d6b669ae0d165b94f1e19b49922b4748798129eed9aa2dae0d2798adabf35dc4cc30b25851a3469a9ee0877775abca26374a4176f8d237f8191fcc870f413ffdbfa73ee22790a548025c4fcafd40f631508f1f6c8d4c847e409c839d21ff146f469feff87198bc184db4b5c5a77f3402f491538503f68e0116dac76344b762627ad678de76cb768779f8f1c35338dd9f72dcc1ac337319b0e21551b9feb85f8cac67a2f35f305a39037bf96cd61869bf1761abcce644598dad254990d17f0faa4965926acb75abf", "password" },
{"truecrypt_RIPEMD_160$6ab053e5ebee8c56bce5705fb1e03bf8cf99e2930232e525befe1e45063aa2e30981585020a967a1c45520543847cdb281557e16c81cea9d329b666e232eeb008dbe3e1f1a181f69f073f0f314bc17e255d42aaa1dbab92231a4fb62d100f6930bae4ccf6726680554dea3e2419fb67230c186f6af2c8b4525eb8ebb73d957b01b8a124b736e45f94160266bcfaeda16b351ec750d980250ebb76672578e9e3a104dde89611bce6ee32179f35073be9f1dee8da002559c6fab292ff3af657cf5a0d864a7844235aeac441afe55f69e51c7a7c06f7330a1c8babae2e6476e3a1d6fb3d4eb63694218e53e0483659aad21f20a70817b86ce56c2b27bae3017727ff26866a00e75f37e6c8091a28582bd202f30a5790f5a90792de010aebc0ed81e9743d00518419f32ce73a8d3f07e55830845fe21c64a8a748cbdca0c3bf512a4938e68a311004538619b65873880f13b2a9486f1292d5c77116509a64eb0a1bba7307f97d42e7cfa36d2b58b71393e04e7e3e328a7728197b8bcdef14cf3f7708cd233c58031c695da5f6b671cc5066323cc86bb3c6311535ad223a44abd4eec9077d70ab0f257de5706a3ff5c15e3bc2bde6496a8414bc6a5ed84fe9462b65efa866312e0699e47338e879ae512a66f3f36fc086d2595bbcff2e744dd1ec283ba8e91299e62e4b2392608dd950ede0c1f3d5b317b2870ead59efe096c054ea1", "123" },
{NULL}
};
static cl_int cl_error;
static pbkdf2_password *inbuffer;
static pbkdf2_hash *outbuffer;
static pbkdf2_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
static size_t insize, outsize, settingsize;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(pbkdf2_password) * gws;
outsize = sizeof(pbkdf2_hash) * gws;
settingsize = sizeof(pbkdf2_salt);
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
first_block_dec = mem_calloc(gws, sizeof(*first_block_dec));
keyfiles_data = mem_calloc(MAX_KEYFILES, sizeof(*keyfiles_data));
keyfiles_length = mem_calloc(MAX_KEYFILES, sizeof(int));
}
static void release_clobj(void)
{
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(first_block_dec);
MEM_FREE(keyfiles_data);
MEM_FREE(keyfiles_length);
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
(int)sizeof(inbuffer->v),
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_ripemd160_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "pbkdf2_ripemd160",
&cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1,
self, create_clobj, release_clobj,
sizeof(pbkdf2_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char* ciphertext, struct fmt_main *self)
{
unsigned int i;
char *p, *q;
int nkeyfiles = -1;
if (strncmp(ciphertext, TAG_RIPEMD160, TAG_RIPEMD160_LEN))
return 0;
ciphertext += TAG_RIPEMD160_LEN;
p = ciphertext;
q = strchr(p, '$');
if (!q) { /* no keyfiles */
if (strlen(ciphertext) != 512*2)
return 0;
} else {
if (q - p != 512 * 2)
return 0;
/* check keyfile(s) */
p = q + 1;
nkeyfiles = atoi(p);
if (nkeyfiles > MAX_KEYFILES || nkeyfiles < 1)
return 0;
}
for (i = 0; i < 512*2; i++) {
if (atoi16l[ARCH_INDEX(ciphertext[i])] == 0x7F)
return 0;
}
return 1;
}
static void set_salt(void *salt)
{
psalt = salt;
memcpy((char*)currentsalt.salt, psalt->salt, SALTLEN);
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
static void* get_salt(char *ciphertext)
{
static char buf[sizeof(struct cust_salt)+4];
struct cust_salt *s = (struct cust_salt*)mem_align(buf, 4);
unsigned int i;
char tpath[PATH_BUFFER_SIZE] = { 0 };
char *p, *q;
int idx;
FILE *fp;
size_t sz;
memset(s, 0, sizeof(struct cust_salt));
s->loop_inc = 1;
ciphertext += TAG_RIPEMD160_LEN;
s->hash_type = IS_RIPEMD160;
s->num_iterations = 2000;
// Convert the hexadecimal salt in binary
for (i = 0; i < 64; i++)
s->salt[i] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])];
for (; i < 512; i++)
s->bin[i-64] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])];
p = ciphertext;
q = strchr(p, '$');
if (!q) /* no keyfiles */
return s;
// process keyfile(s)
p = q + 1;
s->nkeyfiles = atoi(p);
for (idx = 0; idx < s->nkeyfiles; idx++) {
p = strchr(p, '$') + 1; // at first filename
q = strchr(p, '$');
if (!q) { // last file
memset(tpath, 0, sizeof(tpath) - 1);
strncpy(tpath, p, sizeof(tpath));
} else {
memset(tpath, 0, sizeof(tpath) - 1);
strncpy(tpath, p, q-p);
}
/* read this into keyfiles_data[idx] */
fp = fopen(tpath, "rb");
if (!fp)
pexit("fopen %s", p);
if (fseek(fp, 0L, SEEK_END) == -1)
pexit("fseek");
sz = ftell(fp);
if (fseek(fp, 0L, SEEK_SET) == -1)
pexit("fseek");
if (fread(keyfiles_data[idx], 1, sz, fp) != sz)
pexit("fread");
keyfiles_length[idx] = sz;
fclose(fp);
}
return s;
}
static void AES_256_XTS_first_sector(const unsigned char *double_key,
unsigned char *out,
const unsigned char *data,
unsigned len) {
unsigned char tweak[16] = { 0 };
unsigned char buf[16];
int i, j, cnt;
AES_KEY key1, key2;
AES_set_decrypt_key(double_key, 256, &key1);
AES_set_encrypt_key(&double_key[32], 256, &key2);
// first aes tweak (we do it right over tweak
AES_encrypt(tweak, tweak, &key2);
cnt = len/16;
for (j=0;;) {
for (i = 0; i < 16; ++i) buf[i] = data[i]^tweak[i];
AES_decrypt(buf, out, &key1);
for (i = 0; i < 16; ++i) out[i]^=tweak[i];
++j;
if (j == cnt)
break;
else {
unsigned char Cin, Cout;
unsigned x;
Cin = 0;
for (x = 0; x < 16; ++x) {
Cout = (tweak[x] >> 7) & 1;
tweak[x] = ((tweak[x] << 1) + Cin) & 0xFF;
Cin = Cout;
}
if (Cout)
tweak[0] ^= 135; //GF_128_FDBK;
}
data += 16;
out += 16;
}
}
static int apply_keyfiles(unsigned char *pass, size_t pass_memsz, int nkeyfiles)
{
int pl, k;
unsigned char *kpool;
unsigned char *kdata;
int kpool_idx;
size_t i, kdata_sz;
uint32_t crc;
if (pass_memsz < MAX_PASSSZ) {
error();
}
pl = strlen((char*)pass);
memset(pass+pl, 0, MAX_PASSSZ-pl);
if ((kpool = mem_calloc(1, KPOOL_SZ)) == NULL) {
error();
}
for (k = 0; k < nkeyfiles; k++) {
kpool_idx = 0;
kdata_sz = keyfiles_length[k];
kdata = keyfiles_data[k];
crc = ~0U;
for (i = 0; i < kdata_sz; i++) {
crc = jtr_crc32(crc, kdata[i]);
kpool[kpool_idx++] += (unsigned char)(crc >> 24);
kpool[kpool_idx++] += (unsigned char)(crc >> 16);
kpool[kpool_idx++] += (unsigned char)(crc >> 8);
kpool[kpool_idx++] += (unsigned char)(crc);
/* Wrap around */
if (kpool_idx == KPOOL_SZ)
kpool_idx = 0;
}
}
/* Apply keyfile pool to passphrase */
for (i = 0; i < KPOOL_SZ; i++)
pass[i] += kpool[i];
MEM_FREE(kpool);
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int i;
const int count = *pcount;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (psalt->nkeyfiles) {
#if _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
apply_keyfiles(inbuffer[i].v, 64, psalt->nkeyfiles);
inbuffer[i].length = 64;
}
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#if _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
AES_256_XTS_first_sector((unsigned char*)outbuffer[i].v, first_block_dec[i], psalt->bin, 16);
}
return count;
}
static int cmp_all(void* binary, int count)
{
int i;
for (i = 0; i < count; ++i) {
if (!memcmp(first_block_dec[i], "TRUE", 4))
return 1;
}
return 0;
}
static int cmp_one(void* binary, int index)
{
if (!memcmp(first_block_dec[index], "TRUE", 4))
return 1;
return 0;
}
static int cmp_crc32s(unsigned char *given_crc32, CRC32_t comp_crc32) {
return given_crc32[0] == ((comp_crc32>>24)&0xFF) &&
given_crc32[1] == ((comp_crc32>>16)&0xFF) &&
given_crc32[2] == ((comp_crc32>> 8)&0xFF) &&
given_crc32[3] == ((comp_crc32>> 0)&0xFF);
}
static int cmp_exact(char *source, int idx)
{
unsigned char key[64];
unsigned char decr_header[512-64];
CRC32_t check_sum;
int ksz = inbuffer[idx].length;
memcpy(key, inbuffer[idx].v, inbuffer[idx].length);
/* process keyfile(s) */
if (psalt->nkeyfiles) {
apply_keyfiles(key, 64, psalt->nkeyfiles);
ksz = 64;
}
pbkdf2_ripemd160(key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
AES_256_XTS_first_sector(key, decr_header, psalt->bin, 512-64);
if (memcmp(decr_header, "TRUE", 4))
return 0;
CRC32_Init(&check_sum);
CRC32_Update(&check_sum, &decr_header[256-64], 256);
if (!cmp_crc32s(&decr_header[8], ~check_sum))
return 0;
CRC32_Init(&check_sum);
CRC32_Update(&check_sum, decr_header, 256-64-4);
if (!cmp_crc32s(&decr_header[256-64-4], ~check_sum))
return 0;
return 1;
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int salt_hash(void *salt)
{
unsigned v=0, i;
struct cust_salt *psalt = (struct cust_salt*)salt;
for (i = 0; i < 64; ++i) {
v *= 11;
v += psalt->salt[i];
}
return v & (SALT_HASH_SIZE - 1);
}
struct fmt_main FMT_STRUCT = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ TAG_RIPEMD160 },
tests_ripemd160
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
lcs.c | /* Dynamic Programming solution to find length of the
longest common substring
Adapted from http://www.geeksforgeeks.org/longest-common-substring/
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
/*
SPEED UP: 1.84
TEMPO SEQUENCIAL (omp_get_wtime) LCSubStr: 3.33 sec
TEMPO PARALELO (omp_get_wtime) schedule(dynamic, 1000) LCSubStr: 1.81 sec
TEMPO PARALELO (omp_get_wtime) schedule(static, 100) LCSubStr: 1.90 sec
TEMPO PARALELO (omp_get_wtime) schedule(guided, 100) LCSubStr: 1.82 sec
*/
// Read input files
char* readFile(char* filename, int* size)
{
char* buffer = NULL;
*size = 0;
/* Open your_file in read-only mode */
FILE *fp = fopen(filename, "r");
/* Get the buffer size */
fseek(fp, 0, SEEK_END); /* Go to end of file */
*size = ftell(fp); /* How many bytes did we pass ? */
/* Set position of stream to the beginning */
rewind(fp);
/* Allocate the buffer (no need to initialize it with calloc) */
buffer = malloc((*size + 1) * sizeof(*buffer)); /* size + 1 byte for the \0 */
/* Read the file into the buffer */
int err = fread(buffer, *size, 1, fp); /* Read 1 chunk of size bytes from fp into buffer */
/* NULL-terminate the buffer */
buffer[*size] = '\0';
/* Print it ! */
// printf("%s\n", buffer);
return(buffer);
}
// A utility function to find maximum of two integers
int max(int a, int b)
{ return (a > b)? a : b; }
/* Returns length of longest common substring of X[0..m-1]
and Y[0..n-1] */
int LCSubStr(char *x, char *y, int m, int n)
{
// Create a table to store lengths of longest common suffixes of
// substrings. Notethat LCSuff[i][j] contains length of longest
// common suffix of X[0..i-1] and Y[0..j-1]. The first row and
// first column entries have no logical meaning, they are used only
// for simplicity of program
int **LCSuff = (int**) malloc((m+1) * sizeof(int*));
for(int i =0; i < m+1; i++)
LCSuff[i] = (int*) malloc((n+1) * sizeof(int));
int result = 0; // To store length of the longest common substring
/* Following steps build LCSuff[m+1][n+1] in bottom up fashion. */
for (int i=0; i<=m; i++) {
#pragma omp parallel for num_threads(2) reduction(max:result) schedule(dynamic, 1000)
for (int j=0; j<=n; j++) {
if (i == 0 || j == 0)
LCSuff[i][j] = 0;
else if (x[i-1] == y[j-1]) {
LCSuff[i][j] = LCSuff[i-1][j-1] + 1;
result = max(result, LCSuff[i][j]);
}
else LCSuff[i][j] = 0;
}
}
return result;
}
/* Driver program to test above function */
int main()
{
int m, n;
char* x = readFile("seqA.txt",&m);
char* y = readFile("seqB.txt",&n);
double start, end;
start = omp_get_wtime();
int length = LCSubStr(x, y, m, n);
end = omp_get_wtime();
printf("Tempo: %f sec\n", end - start);
printf("\nLength of Longest Common Substring is %d\n",length);
return 0;
}
|
bml_copy_ellsort_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_copy.h"
#include "../bml_types.h"
#include "bml_allocate_ellsort.h"
#include "bml_copy_ellsort.h"
#include "bml_types_ellsort.h"
#include <complex.h>
#include <stdlib.h>
#include <string.h>
/** Copy an ellsort matrix - result is a new matrix.
*
* \ingroup copy_group
*
* \param A The matrix to be copied
* \return A copy of matrix A.
*/
bml_matrix_ellsort_t *TYPED_FUNC(
bml_copy_ellsort_new) (
bml_matrix_ellsort_t * A)
{
bml_matrix_dimension_t matrix_dimension = { A->N, A->N, A->M };
bml_matrix_ellsort_t *B =
TYPED_FUNC(bml_noinit_matrix_ellsort) (matrix_dimension,
A->distribution_mode);
int N = A->N;
int M = A->M;
int *A_index = A->index;
REAL_T *A_value = A->value;
int *B_index = B->index;
REAL_T *B_value = B->value;
// memcpy(B->index, A->index, sizeof(int) * A->N * A->M);
memcpy(B->nnz, A->nnz, sizeof(int) * A->N);
// memcpy(B->value, A->value, sizeof(REAL_T) * A->N * A->M);
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
memcpy(&B_index[ROWMAJOR(i, 0, N, M)], &A_index[ROWMAJOR(i, 0, N, M)],
M * sizeof(int));
memcpy(&B_value[ROWMAJOR(i, 0, N, M)], &A_value[ROWMAJOR(i, 0, N, M)],
M * sizeof(REAL_T));
}
bml_copy_domain(A->domain, B->domain);
bml_copy_domain(A->domain2, B->domain2);
return B;
}
/** Copy an ellsort matrix.
*
* \ingroup copy_group
*
* \param A The matrix to be copied
* \param B Copy of matrix A
*/
void TYPED_FUNC(
bml_copy_ellsort) (
bml_matrix_ellsort_t * A,
bml_matrix_ellsort_t * B)
{
int N = A->N;
int M = A->M;
int *A_index = A->index;
REAL_T *A_value = A->value;
int *B_index = B->index;
REAL_T *B_value = B->value;
// memcpy(B->index, A->index, sizeof(int) * A->N * A->M);
memcpy(B->nnz, A->nnz, sizeof(int) * A->N);
// memcpy(B->value, A->value, sizeof(REAL_T) * A->N * A->M);
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
memcpy(&B_index[ROWMAJOR(i, 0, N, M)], &A_index[ROWMAJOR(i, 0, N, M)],
M * sizeof(int));
memcpy(&B_value[ROWMAJOR(i, 0, N, M)], &A_value[ROWMAJOR(i, 0, N, M)],
M * sizeof(REAL_T));
}
if (A->distribution_mode == B->distribution_mode)
{
bml_copy_domain(A->domain, B->domain);
bml_copy_domain(A->domain2, B->domain2);
}
}
/** Reorder an ellsort matrix.
*
* \ingroup copy_group
*
* \param A The matrix to be reordered
* \param B The permutation vector
*/
void TYPED_FUNC(
bml_reorder_ellsort) (
bml_matrix_ellsort_t * A,
int *perm)
{
int N = A->N;
int M = A->M;
int *A_index = A->index;
int *A_nnz = A->nnz;
REAL_T *A_value = A->value;
bml_matrix_ellsort_t *B = bml_copy_new(A);
int *B_index = B->index;
int *B_nnz = B->nnz;
REAL_T *B_value = B->value;
// Reorder rows - need to copy
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
memcpy(&A_index[ROWMAJOR(perm[i], 0, N, M)],
&B_index[ROWMAJOR(i, 0, N, M)], M * sizeof(int));
memcpy(&A_value[ROWMAJOR(perm[i], 0, N, M)],
&B_value[ROWMAJOR(i, 0, N, M)], M * sizeof(REAL_T));
A_nnz[perm[i]] = B_nnz[i];
}
bml_deallocate_ellsort(B);
// Reorder elements in each row - just change index
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
for (int j = 0; j < A_nnz[i]; j++)
{
A_index[ROWMAJOR(i, j, N, M)] =
perm[A_index[ROWMAJOR(i, j, N, M)]];
}
}
}
|
atlasmm.c | /* Hi, everybody!
* =====================================================================================
*
* Filename: MMmultiple.c
*
* Description: Do Matrix Multiplication C = A x B with A and B blocks generated by
* mkmatrices.c.
*
* Version: 1.0
* Created: 09/21/2016 22:53:31
* Revision: none
* Compiler: gcc
*
* Author: Xiukun Hu
* Organization: University of Wyoming, Department of Mathematics
*
* =====================================================================================
*/
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_num_threads() 1;
#define omp_get_thread_num() 0;
#define omp_get_max_threads() 1;
#endif
#include <stdio.h>
#include <stdlib.h>
#include "cblas.h"
#include "matrices.h"
#ifndef WIDTH
#define WIDTH 30
#endif
void ClearMatrix( double** matrix, int nrows, int ncols ) {
int i, j;
for ( i = 0 ; i < nrows ; i++ )
for ( j = 0 ; j < ncols ; j++ )
matrix[i][j] = 0;
}
int main(){
/* Local declarations */
enum CBLAS_ORDER order = CblasColMajor;
enum CBLAS_TRANSPOSE transA = CblasNoTrans;
enum CBLAS_TRANSPOSE transB = CblasNoTrans;
const int NTH = omp_get_max_threads();
double tsc[NTH];
double tsc1;
double t1; /* Time keeper */
double t2; /* Time keeper */
double tt1;
double tt;
double tio1; /* Private I/O time keeper */
double tio = 0; /* Private I/O time keeper */
double tc1; /* Compute time */
double tc = 0; /* Compute time */
double tw1; /* Wate time */
double tw = 0; /* Wate time */
double temp; /* Private pointer for saving results */
double mrun(); /* Get timing information */
double **ablock[2]; /* Pointer to one block of A */
double **bblock[2]; /* Pointer to one block of B */
double **cblock[2]; /* Pointer to one block of C */
int acols = 0; /* Block columns in A */
int arows = 0; /* Block rows in A */
int bcols = 0; /* Block columns in B */
int brows = 0; /* Block rows in B */
int ccols = 0; /* Block columns in C */
int crows = 0; /* Block rows in C */
int blk_cols = 0; /* Columns in a block */
int blk_rows = 0; /* Rows in a block */
int mopt_a = 1; /* How to allocate space in A blocks */
int mopt_b = 1; /* How to allocate space in B blocks */
int mopt_c = 1; /* How to allocate space in C blocks */
int colleft; /* Block columns residue by WIDTH */
int i = 0; /* Loop index */
int j = 0; /* Loop index */
int k = 0; /* Loop index */
int I,J,K; /* Loop index */
int iplus; /* Loop index */
int jplus; /* Loop index */
int kplus; /* Loop index */
int tog = 0; /* Toggle for a&bblock */
int ctog = 0; /* Toggle for cblock */
int TID; /* Thread ID */
int ar; /* ablock row index */
int ac; /* ablock col index */
int rc;
int nI;
int nThreads;
char c = ' '; /* Input character */
tt1 = mrun();
/* Get matrix information from disk */
matrix_info_read( &blk_rows, &blk_cols,
&arows, &acols,
&brows, &bcols,
&crows, &ccols );
/* Preprocess message */
colleft = blk_cols % WIDTH; /* Colunms left for each block over WIDTH */
nI = blk_rows * (blk_cols / WIDTH); /* Number of iterations for each block */
rc = blk_cols - colleft; /* The starting index of the residue column */
/* Allocate 6 block matrices (two each for A, B and C) */
ablock[0] = block_allocate( blk_rows, blk_cols, mopt_a );
bblock[0] = block_allocate( blk_rows, blk_cols, mopt_b );
cblock[0] = block_allocate( blk_rows, blk_cols, mopt_c );
ablock[1] = block_allocate( blk_rows, blk_cols, mopt_a );
bblock[1] = block_allocate( blk_rows, blk_cols, mopt_b );
cblock[1] = block_allocate( blk_rows, blk_cols, mopt_c );
ClearMatrix( cblock[0], blk_rows, blk_cols );
ClearMatrix( cblock[1], blk_rows, blk_cols );
/* Enter parallel region */
#pragma omp parallel default(none) \
shared(blk_cols, blk_rows, \
ablock, bblock, cblock, \
mopt_a, mopt_b, mopt_c, \
acols, crows, ccols, \
colleft, nI, nThreads, \
rc, t1, t2, tsc, tsc1) \
firstprivate( tog, ctog, i, j, k, tio, tc, tw ) \
private( TID, I, J, K, iplus, jplus, kplus, temp, ar, ac, tio1, tc1, tw1 )
{
#pragma omp single
{
nThreads = omp_get_num_threads();
t1 = mrun();
}
tc1 = t1;
TID = omp_get_thread_num();
/* Single thread reading the A00 B00 for calculating */
#pragma omp single
{
tio1 = mrun();
tc += tio1 - tc1;
block_readdisk( blk_rows, blk_cols, "A", 0, 0, ablock[0], mopt_a, 0 );
block_readdisk( blk_rows, blk_cols, "B", 0, 0, bblock[0], mopt_a, 0 );
tc1 = mrun();
tio += tc1 - tio1;
printf("Thread %d reading A00 and B00 in %les\n", TID, tio);
} // single thread reading A00 B00
/* Reading and calculating at the same time */
while ( i < crows ){
/* Get next loop's index i+, j+ and k+ */
kplus = (k+1) % acols;
jplus = (kplus==0)? ((j+1)%ccols) : j;
iplus = (jplus==0 && kplus==0)? i+1 : i;
/* Single thread reading A_i+k+ & B_k+j+ */
#pragma omp single nowait
{
if ( iplus < crows ) {
tio1 = mrun();
tc += tio1 - tc1;
block_readdisk( blk_rows, blk_cols, "A", iplus, kplus, ablock[1-tog], mopt_a, 0 );
block_readdisk( blk_rows, blk_cols, "B", kplus, jplus, bblock[1-tog], mopt_b, 0 );
tc1 = mrun();
tio += tc1 - tio1;
}
}
#pragma omp single nowait
if ( i == 0 && j == 0 && k == 0 )
tsc1 = mrun();
/* Multithreads calculating A_ik x B_kj */
cblas_dgemm(order,transA,transB, blk_rows, blk_cols, blk_cols ,1.0,
ablock[tog][0], blk_rows , bblock[tog][0], blk_cols ,1.0,cblock[ctog][0], blk_rows);
tw1 = mrun();
tc += tw1 - tc1;
if ( i == 0 && j == 0 && k == 0 )
tsc[TID] = mrun();
/* Barrier for reading A_i+k+ B_k+j+ and calculating A_ik x B_kj */
#pragma omp barrier
tc1 = mrun();
tw += tc1 - tw1;
/* Every thread check but single thread write to disk */
if ( kplus==0 ) {
#pragma omp single nowait
{
tio1 = mrun();
tc += tio1 - tc1;
block_write2disk( blk_rows, blk_cols, "D", i, j, cblock[ctog][0] );
ClearMatrix( cblock[ctog], blk_rows, blk_cols );
tc1 = mrun();
tio += tc1 - tio1;
} // Write cblock: OMP single nowait
ctog = 1-ctog; // Every thread change ctog if k+ = 0.
}
/* Every thread change to another ablock and bblock and update index */
tog = 1 - tog;
i = iplus;
j = jplus;
k = kplus;
} /* While loop for blocks */
printf("Thread %d, compute for %les, io for %les, wait for %le\n", TID, tc, tio, tw);
#pragma omp master
{
t2 = mrun() - t1;
}
}// End of parallel region
printf("Time in parallel region: %les\n", t2);
for ( i = 1 ; i < nThreads ; i++ )
tsc[0] = (tsc[0] < tsc[i])? tsc[i] : tsc[0];
tt = mrun() - tt1;
/* Print time */
printf("Total time: %les\n", tt);
printf("Time for multiplying A00 x B00 in parallel: %le\n", tsc[0]-tsc1);
/* End */
return 0;
}
|
GB_unop__identity_fc64_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_uint32)
// op(A') function: GB (_unop_tran__identity_fc64_uint32)
// C type: GxB_FC64_t
// A type: uint32_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_uint32)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
loops_course_test.c | #include <stdio.h>
#include <math.h>
#include <malloc.h>
#include <string.h>
#define N 729
#define reps 1000
#include <omp.h>
double a[N][N], b[N][N], c[N];
int jmax[N];
void init1(void);
void init2(void);
void runloop(int);
void loop1chunk(int, int);
void loop2chunk(int, int);
void valid1(void);
void valid2(void);
int main(int argc, char *argv[]) {
double start1,start2,end1,end2;
int r;
init1();
start1 = omp_get_wtime();
for (r=0; r<reps; r++){
runloop(1);
}
end1 = omp_get_wtime();
valid1();
printf("Total time for %d reps of loop 1 = %f\n",reps, (float)(end1-start1));
init2();
start2 = omp_get_wtime();
for (r=0; r<reps; r++){
runloop(2);
}
end2 = omp_get_wtime();
valid2();
printf("Total time for %d reps of loop 2 = %f\n",reps, (float)(end2-start2));
}
void init1(void){
int i,j;
for (i=0; i<N; i++){
for (j=0; j<N; j++){
a[i][j] = 0.0;
b[i][j] = 3.142*(i+j);
}
}
}
void init2(void){
int i,j, expr;
for (i=0; i<N; i++){
expr = i%( 3*(i/30) + 1);
if ( expr == 0) {
jmax[i] = N;
}
else {
jmax[i] = 1;
}
c[i] = 0.0;
}
for (i=0; i<N; i++){
for (j=0; j<N; j++){
b[i][j] = (double) (i*j+1) / (double) (N*N);
}
}
}
//////////////////////////////////////////////////////////////////////
///////////// New structure's defination /////////////////////////////
typedef struct _TopList {
int thread_num;
int avail_size;
} TopList ;
//////////////////////////////////////////////////////////////////////
///////////// New functions's defination /////////////////////////////
//Update the available size of the thread in Top List, while reordering the whole list
void updatetoplist(TopList *plist , int list_len , int t_num , int avail_size);
//Get a piece of work from a specific thread's local trunk
int gettrunk(int (*pavail)[2] , TopList *plist , int t_num , int t_size , int *plow , int *phigh);
//Assign a new trunk to a specific thread
int dispatchwork(int (*pavail)[2] , omp_lock_t *plock , TopList *plist , \
int t_num , int t_size , int *plow , int *phigh);
//////////////////////////////////////////////////////////////////////
///////////// New version of runloop function ////////////////////////
void runloop(int loopid) {
//Shared values among threads
int (*pavail)[2]; //The boundary value of each thread's local trunk
TopList *plist; //The ordered list of threads number by their available trunk size
omp_lock_t top_lock; //Lock for serialize the work assignment actions
// printf("***********LOOP BEGIN*********************\n");
#pragma omp parallel default(none) shared(loopid,pavail,plist,top_lock)
{
int myid = omp_get_thread_num();
int nthreads = omp_get_num_threads();
// printf("INIT:T_ID-%d TOTAL-%d\n",myid,nthreads);
// int ipt = (int) ceil((double)N/(double)nthreads);
// int lo = myid*ipt;
// int hi = (myid+1)*ipt;
// if (hi > N) hi = N;
#pragma omp single
{
//Apply the space to store the trunk's boundary for available loops
pavail = (int(*)[2])malloc( sizeof(int)*2*nthreads );
int block_size = N /nthreads ;
//Set the initial trunk bundary for each thread and init the locks.
for (int i = 0; i < nthreads; i++)
{
pavail[i][0] = i * block_size;
pavail[i][1] = (i+1) * block_size;
if ( N - (i+1) * block_size < block_size )
pavail[i][1] = N;
}
//Initialize the top list and lock
omp_init_lock(&top_lock) ;
plist = (TopList *)malloc(sizeof(TopList)*nthreads);
memset(plist,-1,sizeof(TopList)*nthreads);
}
int result = 1;
int lo, hi;
while ( dispatchwork(pavail,&top_lock,plist,myid,nthreads,&lo,&hi) ) {
// printf("CALC:T_ID-%d LOW:%d HIGH:%d \n",myid,lo,hi);
switch (loopid) {
case 1: loop1chunk(lo,hi); break;
case 2: loop2chunk(lo,hi); break;
}
}
}
}
void loop1chunk(int lo, int hi) {
int i,j;
for (i=lo; i<hi; i++){
for (j=N-1; j>i; j--){
a[i][j] += cos(b[i][j]);
}
}
}
void loop2chunk(int lo, int hi) {
int i,j,k;
double rN2;
rN2 = 1.0 / (double) (N*N);
for (i=lo; i<hi; i++){
for (j=0; j < jmax[i]; j++){
for (k=0; k<j; k++){
c[i] += (k+1) * log (b[i][j]) * rN2;
}
}
}
}
void valid1(void) {
int i,j;
double suma;
suma= 0.0;
for (i=0; i<N; i++){
for (j=0; j<N; j++){
suma += a[i][j];
}
}
printf("Loop 1 check: Sum of a is %lf\n", suma);
}
void valid2(void) {
int i;
double sumc;
sumc= 0.0;
for (i=0; i<N; i++){
sumc += c[i];
}
printf("Loop 2 check: Sum of c is %f\n", sumc);
}
/////////////////////////////////////////////////////////////////////
/////////////New functions developed in this coursework//////////////
//Update the available size of the thread in Top List, while reordering the whole list
void updatetoplist(TopList *plist , int list_len , int t_num , int avail_size) {
int origin_num = t_num;
TopList tp_swap,tp_cur;
tp_cur.thread_num = t_num;
tp_cur.avail_size = avail_size;
int iter_mode = 0;
for (int i=0; i<list_len; i++) {
if (iter_mode == 0) {
if (plist[i].thread_num == -1) {
plist[i] = tp_cur;
break;
}
else if (plist[i].thread_num == origin_num) {
plist[i] = tp_cur;
iter_mode = 1;
}
else if (plist[i].avail_size < tp_cur.avail_size ) {
tp_swap = plist[i] ;
plist[i] = tp_cur ;
tp_cur = tp_swap ;
}
}
else if (iter_mode == 1) {
if (plist[i].thread_num == -1) break;
else if (plist[i].avail_size > plist[i-1].avail_size) {
tp_swap = plist[i] ;
plist[i] = plist[i-1];
plist[i-1] = tp_swap;
}
else break;
}
}
}
//Get a piece of work from a specific thread's local trunk
int getchunk(int (*pavail)[2] , TopList *plist , int t_num , int t_size , int *plow , int *phigh) {
int get_size = 0;
int avail_size = pavail[t_num][1] - pavail[t_num][0];
if ( avail_size > 0 ) {
*plow = pavail[t_num][0];
get_size = avail_size / t_size;
if (get_size == 0 ) get_size = avail_size % t_size;
else if (get_size < t_size) get_size = t_size;
*phigh = *plow + get_size;
pavail[t_num][0] += get_size;
avail_size = pavail[t_num][1] - pavail[t_num][0];
updatetoplist(plist,t_size,t_num,avail_size);
return 1;
}
else return 0;
}
//Assign a new trunk to a specific thread
int dispatchwork(int (*pavail)[2] , omp_lock_t *plock , TopList *plist , \
int t_num , int t_size , int *plow , int *phigh) {
// printf("DISPA:T_ID-%d LOW-%d HIGH-%d LOW-%d HIGH-%d LIST-%d AVAIL-%d\n",t_num,pavail[t_num][0],pavail[t_num][1],pavail[t_size-1][0],pavail[t_size-1][1],plist[0].thread_num,plist[0].avail_size);
int result = 1;
omp_set_lock(plock);
if (getchunk(pavail,plist,t_num,t_size,plow,phigh) == 0) {
if (plist[0].avail_size > 0) {
if (getchunk(pavail,plist,plist[0].thread_num,t_size,plow,phigh) == 0)
result = 0;
}
else result = 0;
}
omp_unset_lock(plock);
return result ;
}
|
omp_task_if.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int test_omp_task_if()
{
int condition_false;
int count;
int result;
count=0;
condition_false = (count == 1);
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task if (condition_false) shared(count, result)
{
my_sleep (SLEEPTIME);
#pragma omp critical
result = (0 == count);
} /* end of omp task */
#pragma omp critical
count = 1;
} /* end of single */
} /*end of parallel */
return result;
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_task_if()) {
num_failed++;
}
}
return num_failed;
}
|
interpolation_v4.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
static inline void interpolation_v4_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[] using volume averaged quartic prolongation
int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int write_dim_j = block->dim.j<<1;
int write_dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
const double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
read = level_c->my_boxes[ block->read.box].vectors[id_c] + level_c->box_ghosts*(1+ read_jStride+ read_kStride);
}
if(block->write.box>=0){
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->box_ghosts*(1+write_jStride+write_kStride);
}
#ifdef USE_NAIVE_INTERP
// naive 125pt per fine grid cell
int i,j,k;
double c2 = -3.0/128.0;
double c1 = 22.0/128.0;
int dj = read_jStride;
int dk = read_kStride;
int dj2 = 2*read_jStride;
int dk2 = 2*read_kStride;
for(k=0;k<write_dim_k;k++){double sk1=c1,sk2=c2;if(k&0x1){sk1=-c1;sk2=-c2;}
for(j=0;j<write_dim_j;j++){double sj1=c1,sj2=c2;if(j&0x1){sj1=-c1;sj2=-c2;}
for(i=0;i<write_dim_i;i++){double si1=c1,si2=c2;if(i&0x1){si1=-c1;si2=-c2;}
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
//
// | -3/128 | +22/128 | 1.0 | -22/128 | +3/128 | coarse grid
// |-----+-----|-----+-----|-----+-----|-----+-----|-----+-----|
// | | | | |?????| | | | | | fine grid
//
write[write_ijk] = prescale_f*write[write_ijk] +
+ sk2*( + sj2*( si2*read[read_ijk-2-dj2-dk2] + si1*read[read_ijk-1-dj2-dk2] + read[read_ijk-dj2-dk2] - si1*read[read_ijk+1-dj2-dk2] - si2*read[read_ijk+2-dj2-dk2] )
+ sj1*( si2*read[read_ijk-2-dj -dk2] + si1*read[read_ijk-1-dj -dk2] + read[read_ijk-dj -dk2] - si1*read[read_ijk+1-dj -dk2] - si2*read[read_ijk+2-dj -dk2] )
+ ( si2*read[read_ijk-2 -dk2] + si1*read[read_ijk-1 -dk2] + read[read_ijk -dk2] - si1*read[read_ijk+1 -dk2] - si2*read[read_ijk+2 -dk2] )
- sj1*( si2*read[read_ijk-2+dj -dk2] + si1*read[read_ijk-1+dj -dk2] + read[read_ijk+dj -dk2] - si1*read[read_ijk+1+dj -dk2] - si2*read[read_ijk+2+dj -dk2] )
- sj2*( si2*read[read_ijk-2+dj2-dk2] + si1*read[read_ijk-1+dj2-dk2] + read[read_ijk+dj2-dk2] - si1*read[read_ijk+1+dj2-dk2] - si2*read[read_ijk+2+dj2-dk2] ) )
+ sk1*( + sj2*( si2*read[read_ijk-2-dj2-dk ] + si1*read[read_ijk-1-dj2-dk ] + read[read_ijk-dj2-dk ] - si1*read[read_ijk+1-dj2-dk ] - si2*read[read_ijk+2-dj2-dk ] )
+ sj1*( si2*read[read_ijk-2-dj -dk ] + si1*read[read_ijk-1-dj -dk ] + read[read_ijk-dj -dk ] - si1*read[read_ijk+1-dj -dk ] - si2*read[read_ijk+2-dj -dk ] )
+ ( si2*read[read_ijk-2 -dk ] + si1*read[read_ijk-1 -dk ] + read[read_ijk -dk ] - si1*read[read_ijk+1 -dk ] - si2*read[read_ijk+2 -dk ] )
- sj1*( si2*read[read_ijk-2+dj -dk ] + si1*read[read_ijk-1+dj -dk ] + read[read_ijk+dj -dk ] - si1*read[read_ijk+1+dj -dk ] - si2*read[read_ijk+2+dj -dk ] )
- sj2*( si2*read[read_ijk-2+dj2-dk ] + si1*read[read_ijk-1+dj2-dk ] + read[read_ijk+dj2-dk ] - si1*read[read_ijk+1+dj2-dk ] - si2*read[read_ijk+2+dj2-dk ] ) )
+ ( + sj2*( si2*read[read_ijk-2-dj2 ] + si1*read[read_ijk-1-dj2 ] + read[read_ijk-dj2 ] - si1*read[read_ijk+1-dj2 ] - si2*read[read_ijk+2-dj2 ] )
+ sj1*( si2*read[read_ijk-2-dj ] + si1*read[read_ijk-1-dj ] + read[read_ijk-dj ] - si1*read[read_ijk+1-dj ] - si2*read[read_ijk+2-dj ] )
+ ( si2*read[read_ijk-2 ] + si1*read[read_ijk-1 ] + read[read_ijk ] - si1*read[read_ijk+1 ] - si2*read[read_ijk+2 ] )
- sj1*( si2*read[read_ijk-2+dj ] + si1*read[read_ijk-1+dj ] + read[read_ijk+dj ] - si1*read[read_ijk+1+dj ] - si2*read[read_ijk+2+dj ] )
- sj2*( si2*read[read_ijk-2+dj2 ] + si1*read[read_ijk-1+dj2 ] + read[read_ijk+dj2 ] - si1*read[read_ijk+1+dj2 ] - si2*read[read_ijk+2+dj2 ] ) )
- sk1*( + sj2*( si2*read[read_ijk-2-dj2+dk ] + si1*read[read_ijk-1-dj2+dk ] + read[read_ijk-dj2+dk ] - si1*read[read_ijk+1-dj2+dk ] - si2*read[read_ijk+2-dj2+dk ] )
+ sj1*( si2*read[read_ijk-2-dj +dk ] + si1*read[read_ijk-1-dj +dk ] + read[read_ijk-dj +dk ] - si1*read[read_ijk+1-dj +dk ] - si2*read[read_ijk+2-dj +dk ] )
+ ( si2*read[read_ijk-2 +dk ] + si1*read[read_ijk-1 +dk ] + read[read_ijk +dk ] - si1*read[read_ijk+1 +dk ] - si2*read[read_ijk+2 +dk ] )
- sj1*( si2*read[read_ijk-2+dj +dk ] + si1*read[read_ijk-1+dj +dk ] + read[read_ijk+dj +dk ] - si1*read[read_ijk+1+dj +dk ] - si2*read[read_ijk+2+dj +dk ] )
- sj2*( si2*read[read_ijk-2+dj2+dk ] + si1*read[read_ijk-1+dj2+dk ] + read[read_ijk+dj2+dk ] - si1*read[read_ijk+1+dj2+dk ] - si2*read[read_ijk+2+dj2+dk ] ) )
- sk2*( + sj2*( si2*read[read_ijk-2-dj2+dk2] + si1*read[read_ijk-1-dj2+dk2] + read[read_ijk-dj2+dk2] - si1*read[read_ijk+1-dj2+dk2] - si2*read[read_ijk+2-dj2+dk2] )
+ sj1*( si2*read[read_ijk-2-dj +dk2] + si1*read[read_ijk-1-dj +dk2] + read[read_ijk-dj +dk2] - si1*read[read_ijk+1-dj +dk2] - si2*read[read_ijk+2-dj +dk2] )
+ ( si2*read[read_ijk-2 +dk2] + si1*read[read_ijk-1 +dk2] + read[read_ijk +dk2] - si1*read[read_ijk+1 +dk2] - si2*read[read_ijk+2 +dk2] )
- sj1*( si2*read[read_ijk-2+dj +dk2] + si1*read[read_ijk-1+dj +dk2] + read[read_ijk+dj +dk2] - si1*read[read_ijk+1+dj +dk2] - si2*read[read_ijk+2+dj +dk2] )
- sj2*( si2*read[read_ijk-2+dj2+dk2] + si1*read[read_ijk-1+dj2+dk2] + read[read_ijk+dj2+dk2] - si1*read[read_ijk+1+dj2+dk2] - si2*read[read_ijk+2+dj2+dk2] ) );
}}}
#else
// exploit tensor product symmetry and perform 8 fine grid interpolations at a time...
// 50 x 5pt for i
// 20 x 5pt for j
// 8 x 5pt for k
// ----------------
// 78 x 5pt for 8 cells (vs 8x125pt = 200x5pt in naive)
int i,j,k;
int ii,jj,kk;
double c2 = -3.0/128.0;
double c1 = 22.0/128.0;
int dj = read_jStride;
int dk = read_kStride;
int dj2 = 2*read_jStride;
int dk2 = 2*read_kStride;
for(k=0,kk=0;k<write_dim_k;k+=2,kk++){
for(j=0,jj=0;j<write_dim_j;j+=2,jj++){
// compiler cannot infer/speculate write[ijk+write_jStride] is disjoint from write[ijk], so create a unique restrict pointers for each nonliteral offset...
double * __restrict__ write00 = write + write_i + (write_j+j+0)*write_jStride + (write_k+k+0)*write_kStride;
double * __restrict__ write10 = write + write_i + (write_j+j+1)*write_jStride + (write_k+k+0)*write_kStride;
double * __restrict__ write01 = write + write_i + (write_j+j+0)*write_jStride + (write_k+k+1)*write_kStride;
double * __restrict__ write11 = write + write_i + (write_j+j+1)*write_jStride + (write_k+k+1)*write_kStride;
for(i=0,ii=0;i<write_dim_i;i+=2,ii++){
int write_ijk = ( i+write_i) + ( j+write_j)*write_jStride + ( k+write_k)*write_kStride;
int read_ijk = (ii+ read_i) + (jj+ read_j)* read_jStride + (kk+ read_k)* read_kStride;
//
// | -3/128 | +22/128 | 1.0 | -22/128 | +3/128 | coarse grid
// |-----+-----|-----+-----|-----+-----|-----+-----|-----+-----|
// | | | | |?????| | | | | | fine grid
//
// grab all coarse grid points...
const double c000=read[read_ijk-2-dj2-dk2], c100=read[read_ijk-1-dj2-dk2], c200=read[read_ijk-dj2-dk2], c300=read[read_ijk+1-dj2-dk2], c400=read[read_ijk+2-dj2-dk2];
const double c010=read[read_ijk-2-dj -dk2], c110=read[read_ijk-1-dj -dk2], c210=read[read_ijk-dj -dk2], c310=read[read_ijk+1-dj -dk2], c410=read[read_ijk+2-dj -dk2];
const double c020=read[read_ijk-2 -dk2], c120=read[read_ijk-1 -dk2], c220=read[read_ijk -dk2], c320=read[read_ijk+1 -dk2], c420=read[read_ijk+2 -dk2];
const double c030=read[read_ijk-2+dj -dk2], c130=read[read_ijk-1+dj -dk2], c230=read[read_ijk+dj -dk2], c330=read[read_ijk+1+dj -dk2], c430=read[read_ijk+2+dj -dk2];
const double c040=read[read_ijk-2+dj2-dk2], c140=read[read_ijk-1+dj2-dk2], c240=read[read_ijk+dj2-dk2], c340=read[read_ijk+1+dj2-dk2], c440=read[read_ijk+2+dj2-dk2];
const double c001=read[read_ijk-2-dj2-dk ], c101=read[read_ijk-1-dj2-dk ], c201=read[read_ijk-dj2-dk ], c301=read[read_ijk+1-dj2-dk ], c401=read[read_ijk+2-dj2-dk ];
const double c011=read[read_ijk-2-dj -dk ], c111=read[read_ijk-1-dj -dk ], c211=read[read_ijk-dj -dk ], c311=read[read_ijk+1-dj -dk ], c411=read[read_ijk+2-dj -dk ];
const double c021=read[read_ijk-2 -dk ], c121=read[read_ijk-1 -dk ], c221=read[read_ijk -dk ], c321=read[read_ijk+1 -dk ], c421=read[read_ijk+2 -dk ];
const double c031=read[read_ijk-2+dj -dk ], c131=read[read_ijk-1+dj -dk ], c231=read[read_ijk+dj -dk ], c331=read[read_ijk+1+dj -dk ], c431=read[read_ijk+2+dj -dk ];
const double c041=read[read_ijk-2+dj2-dk ], c141=read[read_ijk-1+dj2-dk ], c241=read[read_ijk+dj2-dk ], c341=read[read_ijk+1+dj2-dk ], c441=read[read_ijk+2+dj2-dk ];
const double c002=read[read_ijk-2-dj2 ], c102=read[read_ijk-1-dj2 ], c202=read[read_ijk-dj2 ], c302=read[read_ijk+1-dj2 ], c402=read[read_ijk+2-dj2 ];
const double c012=read[read_ijk-2-dj ], c112=read[read_ijk-1-dj ], c212=read[read_ijk-dj ], c312=read[read_ijk+1-dj ], c412=read[read_ijk+2-dj ];
const double c022=read[read_ijk-2 ], c122=read[read_ijk-1 ], c222=read[read_ijk ], c322=read[read_ijk+1 ], c422=read[read_ijk+2 ];
const double c032=read[read_ijk-2+dj ], c132=read[read_ijk-1+dj ], c232=read[read_ijk+dj ], c332=read[read_ijk+1+dj ], c432=read[read_ijk+2+dj ];
const double c042=read[read_ijk-2+dj2 ], c142=read[read_ijk-1+dj2 ], c242=read[read_ijk+dj2 ], c342=read[read_ijk+1+dj2 ], c442=read[read_ijk+2+dj2 ];
const double c003=read[read_ijk-2-dj2+dk ], c103=read[read_ijk-1-dj2+dk ], c203=read[read_ijk-dj2+dk ], c303=read[read_ijk+1-dj2+dk ], c403=read[read_ijk+2-dj2+dk ];
const double c013=read[read_ijk-2-dj +dk ], c113=read[read_ijk-1-dj +dk ], c213=read[read_ijk-dj +dk ], c313=read[read_ijk+1-dj +dk ], c413=read[read_ijk+2-dj +dk ];
const double c023=read[read_ijk-2 +dk ], c123=read[read_ijk-1 +dk ], c223=read[read_ijk +dk ], c323=read[read_ijk+1 +dk ], c423=read[read_ijk+2 +dk ];
const double c033=read[read_ijk-2+dj +dk ], c133=read[read_ijk-1+dj +dk ], c233=read[read_ijk+dj +dk ], c333=read[read_ijk+1+dj +dk ], c433=read[read_ijk+2+dj +dk ];
const double c043=read[read_ijk-2+dj2+dk ], c143=read[read_ijk-1+dj2+dk ], c243=read[read_ijk+dj2+dk ], c343=read[read_ijk+1+dj2+dk ], c443=read[read_ijk+2+dj2+dk ];
const double c004=read[read_ijk-2-dj2+dk2], c104=read[read_ijk-1-dj2+dk2], c204=read[read_ijk-dj2+dk2], c304=read[read_ijk+1-dj2+dk2], c404=read[read_ijk+2-dj2+dk2];
const double c014=read[read_ijk-2-dj +dk2], c114=read[read_ijk-1-dj +dk2], c214=read[read_ijk-dj +dk2], c314=read[read_ijk+1-dj +dk2], c414=read[read_ijk+2-dj +dk2];
const double c024=read[read_ijk-2 +dk2], c124=read[read_ijk-1 +dk2], c224=read[read_ijk +dk2], c324=read[read_ijk+1 +dk2], c424=read[read_ijk+2 +dk2];
const double c034=read[read_ijk-2+dj +dk2], c134=read[read_ijk-1+dj +dk2], c234=read[read_ijk+dj +dk2], c334=read[read_ijk+1+dj +dk2], c434=read[read_ijk+2+dj +dk2];
const double c044=read[read_ijk-2+dj2+dk2], c144=read[read_ijk-1+dj2+dk2], c244=read[read_ijk+dj2+dk2], c344=read[read_ijk+1+dj2+dk2], c444=read[read_ijk+2+dj2+dk2];
// interpolate in i to create fine i / coarse jk points...
const double f0c00 = ( c200 + c1*(c100-c300) + c2*(c000-c400) ); // same as original 5pt stencil... f0c00 = ( c2*c000 + c1*c100 + c200 - c1*c300 - c2*c400 )
const double f1c00 = ( c200 - c1*(c100-c300) - c2*(c000-c400) );
const double f0c10 = ( c210 + c1*(c110-c310) + c2*(c010-c410) );
const double f1c10 = ( c210 - c1*(c110-c310) - c2*(c010-c410) );
const double f0c20 = ( c220 + c1*(c120-c320) + c2*(c020-c420) );
const double f1c20 = ( c220 - c1*(c120-c320) - c2*(c020-c420) );
const double f0c30 = ( c230 + c1*(c130-c330) + c2*(c030-c430) );
const double f1c30 = ( c230 - c1*(c130-c330) - c2*(c030-c430) );
const double f0c40 = ( c240 + c1*(c140-c340) + c2*(c040-c440) );
const double f1c40 = ( c240 - c1*(c140-c340) - c2*(c040-c440) );
const double f0c01 = ( c201 + c1*(c101-c301) + c2*(c001-c401) );
const double f1c01 = ( c201 - c1*(c101-c301) - c2*(c001-c401) );
const double f0c11 = ( c211 + c1*(c111-c311) + c2*(c011-c411) );
const double f1c11 = ( c211 - c1*(c111-c311) - c2*(c011-c411) );
const double f0c21 = ( c221 + c1*(c121-c321) + c2*(c021-c421) );
const double f1c21 = ( c221 - c1*(c121-c321) - c2*(c021-c421) );
const double f0c31 = ( c231 + c1*(c131-c331) + c2*(c031-c431) );
const double f1c31 = ( c231 - c1*(c131-c331) - c2*(c031-c431) );
const double f0c41 = ( c241 + c1*(c141-c341) + c2*(c041-c441) );
const double f1c41 = ( c241 - c1*(c141-c341) - c2*(c041-c441) );
const double f0c02 = ( c202 + c1*(c102-c302) + c2*(c002-c402) );
const double f1c02 = ( c202 - c1*(c102-c302) - c2*(c002-c402) );
const double f0c12 = ( c212 + c1*(c112-c312) + c2*(c012-c412) );
const double f1c12 = ( c212 - c1*(c112-c312) - c2*(c012-c412) );
const double f0c22 = ( c222 + c1*(c122-c322) + c2*(c022-c422) );
const double f1c22 = ( c222 - c1*(c122-c322) - c2*(c022-c422) );
const double f0c32 = ( c232 + c1*(c132-c332) + c2*(c032-c432) );
const double f1c32 = ( c232 - c1*(c132-c332) - c2*(c032-c432) );
const double f0c42 = ( c242 + c1*(c142-c342) + c2*(c042-c442) );
const double f1c42 = ( c242 - c1*(c142-c342) - c2*(c042-c442) );
const double f0c03 = ( c203 + c1*(c103-c303) + c2*(c003-c403) );
const double f1c03 = ( c203 - c1*(c103-c303) - c2*(c003-c403) );
const double f0c13 = ( c213 + c1*(c113-c313) + c2*(c013-c413) );
const double f1c13 = ( c213 - c1*(c113-c313) - c2*(c013-c413) );
const double f0c23 = ( c223 + c1*(c123-c323) + c2*(c023-c423) );
const double f1c23 = ( c223 - c1*(c123-c323) - c2*(c023-c423) );
const double f0c33 = ( c233 + c1*(c133-c333) + c2*(c033-c433) );
const double f1c33 = ( c233 - c1*(c133-c333) - c2*(c033-c433) );
const double f0c43 = ( c243 + c1*(c143-c343) + c2*(c043-c443) );
const double f1c43 = ( c243 - c1*(c143-c343) - c2*(c043-c443) );
const double f0c04 = ( c204 + c1*(c104-c304) + c2*(c004-c404) );
const double f1c04 = ( c204 - c1*(c104-c304) - c2*(c004-c404) );
const double f0c14 = ( c214 + c1*(c114-c314) + c2*(c014-c414) );
const double f1c14 = ( c214 - c1*(c114-c314) - c2*(c014-c414) );
const double f0c24 = ( c224 + c1*(c124-c324) + c2*(c024-c424) );
const double f1c24 = ( c224 - c1*(c124-c324) - c2*(c024-c424) );
const double f0c34 = ( c234 + c1*(c134-c334) + c2*(c034-c434) );
const double f1c34 = ( c234 - c1*(c134-c334) - c2*(c034-c434) );
const double f0c44 = ( c244 + c1*(c144-c344) + c2*(c044-c444) );
const double f1c44 = ( c244 - c1*(c144-c344) - c2*(c044-c444) );
// interpolate in j to create fine ij / coarse k points...
const double f00c0 = (f0c20 + c1*(f0c10-f0c30) + c2*(f0c00-f0c40) );
const double f10c0 = (f1c20 + c1*(f1c10-f1c30) + c2*(f1c00-f1c40) );
const double f01c0 = (f0c20 - c1*(f0c10-f0c30) - c2*(f0c00-f0c40) );
const double f11c0 = (f1c20 - c1*(f1c10-f1c30) - c2*(f1c00-f1c40) );
const double f00c1 = (f0c21 + c1*(f0c11-f0c31) + c2*(f0c01-f0c41) );
const double f10c1 = (f1c21 + c1*(f1c11-f1c31) + c2*(f1c01-f1c41) );
const double f01c1 = (f0c21 - c1*(f0c11-f0c31) - c2*(f0c01-f0c41) );
const double f11c1 = (f1c21 - c1*(f1c11-f1c31) - c2*(f1c01-f1c41) );
const double f00c2 = (f0c22 + c1*(f0c12-f0c32) + c2*(f0c02-f0c42) );
const double f10c2 = (f1c22 + c1*(f1c12-f1c32) + c2*(f1c02-f1c42) );
const double f01c2 = (f0c22 - c1*(f0c12-f0c32) - c2*(f0c02-f0c42) );
const double f11c2 = (f1c22 - c1*(f1c12-f1c32) - c2*(f1c02-f1c42) );
const double f00c3 = (f0c23 + c1*(f0c13-f0c33) + c2*(f0c03-f0c43) );
const double f10c3 = (f1c23 + c1*(f1c13-f1c33) + c2*(f1c03-f1c43) );
const double f01c3 = (f0c23 - c1*(f0c13-f0c33) - c2*(f0c03-f0c43) );
const double f11c3 = (f1c23 - c1*(f1c13-f1c33) - c2*(f1c03-f1c43) );
const double f00c4 = (f0c24 + c1*(f0c14-f0c34) + c2*(f0c04-f0c44) );
const double f10c4 = (f1c24 + c1*(f1c14-f1c34) + c2*(f1c04-f1c44) );
const double f01c4 = (f0c24 - c1*(f0c14-f0c34) - c2*(f0c04-f0c44) );
const double f11c4 = (f1c24 - c1*(f1c14-f1c34) - c2*(f1c04-f1c44) );
// interpolate in k to create fine ijk points...
const double f000 = (f00c2 + c1*(f00c1-f00c3) + c2*(f00c0-f00c4) );
const double f100 = (f10c2 + c1*(f10c1-f10c3) + c2*(f10c0-f10c4) );
const double f010 = (f01c2 + c1*(f01c1-f01c3) + c2*(f01c0-f01c4) );
const double f110 = (f11c2 + c1*(f11c1-f11c3) + c2*(f11c0-f11c4) );
const double f001 = (f00c2 - c1*(f00c1-f00c3) - c2*(f00c0-f00c4) );
const double f101 = (f10c2 - c1*(f10c1-f10c3) - c2*(f10c0-f10c4) );
const double f011 = (f01c2 - c1*(f01c1-f01c3) - c2*(f01c0-f01c4) );
const double f111 = (f11c2 - c1*(f11c1-f11c3) - c2*(f11c0-f11c4) );
// commit to memory...
#if 0 // compiler cannot infer/speculate write[ijk+write_jStride] is disjoint from write[ijk], and thus cannot vectorize...
write[write_ijk ] = prescale_f*write[write_ijk ] + f000;
write[write_ijk+1 ] = prescale_f*write[write_ijk+1 ] + f100;
write[write_ijk +write_jStride ] = prescale_f*write[write_ijk +write_jStride ] + f010;
write[write_ijk+1+write_jStride ] = prescale_f*write[write_ijk+1+write_jStride ] + f110;
write[write_ijk +write_kStride] = prescale_f*write[write_ijk +write_kStride] + f001;
write[write_ijk+1 +write_kStride] = prescale_f*write[write_ijk+1 +write_kStride] + f101;
write[write_ijk +write_jStride+write_kStride] = prescale_f*write[write_ijk +write_jStride+write_kStride] + f011;
write[write_ijk+1+write_jStride+write_kStride] = prescale_f*write[write_ijk+1+write_jStride+write_kStride] + f111;
#else // use a unique restrict pointer for each pencil...
write00[i ] = prescale_f*write00[i ] + f000;
write00[i+1] = prescale_f*write00[i+1] + f100;
write10[i ] = prescale_f*write10[i ] + f010;
write10[i+1] = prescale_f*write10[i+1] + f110;
write01[i ] = prescale_f*write01[i ] + f001;
write01[i+1] = prescale_f*write01[i+1] + f101;
write11[i ] = prescale_f*write11[i ] + f011;
write11[i+1] = prescale_f*write11[i+1] + f111;
#endif
}}}
#endif
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) volumetric quartic interpolation on vector id_c of the coarse level and increments prescale_f*vector id_f on the fine level by the result
// i.e. id_f = prescale_f*id_f + P*id_c
// prescale_f is nominally 1.0 or 0.0
// quartic interpolation requires a full ghost zone exchange and boundary condition
// This is a rather bulk synchronous implementation which packs all MPI buffers before initiating any sends
// Similarly, it waits for all remote data before copying any into local boxes.
// It does however attempt to overlap local interpolation with MPI
void interpolation_v4(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
exchange_boundary(level_c,id_c,STENCIL_SHAPE_BOX);
apply_BCs_v4(level_c,id_c,STENCIL_SHAPE_BOX);
double _timeCommunicationStart = getTime();
double _timeStart,_timeEnd;
int buffer=0;
int n;
int my_tag = (level_f->tag<<4) | 0x7;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends...
int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs;
MPI_Request *recv_requests = level_f->interpolation.requests;
MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
if(level_f->interpolation.num_recvs>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->interpolation.num_recvs;n++){
MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = getTime();
level_f->timers.interpolation_recv += (_timeEnd-_timeStart);
}
// pack MPI send buffers...
if(level_c->interpolation.num_blocks[0]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0])
for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){
// !!! prescale==0 because you don't want to increment the MPI buffer
interpolation_v4_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_pack += (_timeEnd-_timeStart);
}
// loop through MPI send buffers and post Isend's...
if(level_c->interpolation.num_sends>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->interpolation.num_sends;n++){
MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = getTime();
level_f->timers.interpolation_send += (_timeEnd-_timeStart);
}
#endif
// perform local interpolation... try and hide within Isend latency...
if(level_c->interpolation.num_blocks[1]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1])
for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){
interpolation_v4_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_local += (_timeEnd-_timeStart);
}
// wait for MPI to finish...
#ifdef USE_MPI
if(nMessages>0){
_timeStart = getTime();
MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = getTime();
level_f->timers.interpolation_wait += (_timeEnd-_timeStart);
}
// unpack MPI receive buffers
if(level_f->interpolation.num_blocks[2]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2])
for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){
IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_unpack += (_timeEnd-_timeStart);
}
#endif
level_f->timers.interpolation_total += (double)(getTime()-_timeCommunicationStart);
}
|
GB_apply_op.c | //------------------------------------------------------------------------------
// GB_apply_op: typecast and apply a unary or binary operator to an array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Cx = op (A)
// Cx and A->x may be aliased.
// This function is CSR/CSC agnostic. For positional ops, A is treated as if
// it is in CSC format. The caller has already modified the op if A is in CSR
// format.
// Template/GB_positional_op_ijp can return GrB_OUT_OF_MEMORY.
// Otherwise, this function only returns GrB_SUCCESS.
#include "GB_apply.h"
#include "GB_binop.h"
#include "GB_ek_slice.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_unop__include.h"
#include "GB_binop__include.h"
#endif
GrB_Info GB_apply_op // apply a unary operator, Cx = op (A)
(
GB_void *Cx, // output array, of type op->ztype
const GrB_UnaryOp op1, // unary operator to apply
const GrB_BinaryOp op2, // binary operator to apply
const GxB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, binop(x,Ax) else binop(Ax,y)
const GrB_Matrix A, // input matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Cx != NULL) ;
ASSERT (op1 != NULL || op2 != NULL) ;
ASSERT_MATRIX_OK (A, "A input for GB_apply_op", GB0) ;
ASSERT (GB_JUMBLED_OK (A)) ; // A can be jumbled
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
// A->x is not const since the operator might be applied in-place, if
// C is aliased to C.
GB_void *Ax = (GB_void *) A->x ; // A->x has type A->type
const int8_t *Ab = A->b ; // only if A is bitmap
const GrB_Type Atype = A->type ; // type of A->x
const int64_t anz = GB_NNZ_HELD (A) ; // size of A->x and Cx
//--------------------------------------------------------------------------
// determine the maximum number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// apply the operator
//--------------------------------------------------------------------------
GB_Opcode opcode = (op1 != NULL) ? op1->opcode : op2->opcode ;
if (GB_OPCODE_IS_POSITIONAL (opcode))
{
//----------------------------------------------------------------------
// built-in positional unary or binary operator
//----------------------------------------------------------------------
bool is64 ;
if (op1 != NULL)
{
ASSERT_UNARYOP_OK (op1, "positional op1 for GB_apply_op", GB0) ;
is64 = (op1->ztype == GrB_INT64) ;
}
else // if (op2 != NULL)
{
ASSERT_BINARYOP_OK (op2, "positional op2 for GB_apply_op", GB0) ;
is64 = (op2->ztype == GrB_INT64) ;
}
// get A and C
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ai = A->i ;
int64_t anvec = A->nvec ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
//----------------------------------------------------------------------
// determine number of threads to use
//----------------------------------------------------------------------
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ;
//----------------------------------------------------------------------
// Cx = positional_op (A)
//----------------------------------------------------------------------
int64_t offset = GB_positional_offset (opcode) ;
// GB_positional_op_ijp allocates a set of tasks, which can possibly
// fail if out of memory.
if (is64)
{
int64_t *GB_RESTRICT Cx_int = (int64_t *) Cx ;
switch (opcode)
{
case GB_POSITIONI_opcode : // z = position_i(A(i,j)) == i
case GB_POSITIONI1_opcode : // z = position_i1(A(i,j)) == i+1
case GB_FIRSTI_opcode : // z = first_i(A(i,j),y) == i
case GB_FIRSTI1_opcode : // z = first_i1(A(i,j),y) == i+1
case GB_SECONDI_opcode : // z = second_i(x,A(i,j)) == i
case GB_SECONDI1_opcode : // z = second_i1(x,A(i,j)) == i+1
#define GB_POSITION i + offset
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
case GB_POSITIONJ_opcode : // z = position_j(A(i,j)) == j
case GB_POSITIONJ1_opcode : // z = position_j1(A(i,j)) == j+1
case GB_FIRSTJ_opcode : // z = first_j(A(i,j),y) == j
case GB_FIRSTJ1_opcode : // z = first_j1(A(i,j),y) == j+1
case GB_SECONDJ_opcode : // z = second_j(x,A(i,j)) == j
case GB_SECONDJ1_opcode : // z = second_j1(x,A(i,j)) == j+1
#define GB_POSITION j + offset
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
default: ;
}
}
else
{
int32_t *GB_RESTRICT Cx_int = (int32_t *) Cx ;
switch (opcode)
{
case GB_POSITIONI_opcode : // z = position_i(A(i,j)) == i
case GB_POSITIONI1_opcode : // z = position_i1(A(i,j)) == i+1
case GB_FIRSTI_opcode : // z = first_i(A(i,j),y) == i
case GB_FIRSTI1_opcode : // z = first_i1(A(i,j),y) == i+1
case GB_SECONDI_opcode : // z = second_i(x,A(i,j)) == i
case GB_SECONDI1_opcode : // z = second_i1(x,A(i,j)) == i+1
#define GB_POSITION (int32_t) (i + offset)
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
case GB_POSITIONJ_opcode : // z = position_j(A(i,j)) == j
case GB_POSITIONJ1_opcode : // z = position_j1(A(i,j)) == j+1
case GB_FIRSTJ_opcode : // z = first_j(A(i,j),y) == j
case GB_FIRSTJ1_opcode : // z = first_j1(A(i,j),y) == j+1
case GB_SECONDJ_opcode : // z = second_j(x,A(i,j)) == j
case GB_SECONDJ1_opcode : // z = second_j1(x,A(i,j)) == j+1
#define GB_POSITION (int32_t) (j + offset)
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
default: ;
}
}
}
else if (op1 != NULL)
{
//----------------------------------------------------------------------
// unary operator
//----------------------------------------------------------------------
ASSERT_UNARYOP_OK (op1, "op1 for GB_apply_op", GB0) ;
// determine number of threads to use
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
GrB_UnaryOp op = op1 ;
#ifndef GBCOMPACT
if ((Atype == op->xtype)
|| (opcode == GB_IDENTITY_opcode) || (opcode == GB_ONE_opcode))
{
// The switch factory is used if the op is IDENTITY or ONE, or if
// no typecasting is being done. The ONE operator ignores the type
// of its input and just produces a 1 of op->ztype == op->xtype.
// The IDENTITY operator can do arbitrary typecasting.
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_unop_apply(op,zname,aname) \
GB_unop_apply_ ## op ## zname ## aname
#define GB_WORKER(op,zname,ztype,aname,atype) \
{ \
if (GB_unop_apply (op,zname,aname) ((ztype *) Cx, \
(const atype *) Ax, Ab, anz, nthreads) \
== GrB_SUCCESS) return (GrB_SUCCESS) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
#include "GB_unop_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker: typecast and apply a unary operator
//----------------------------------------------------------------------
GB_BURBLE_N (anz, "(generic apply: %s) ", op->name) ;
size_t asize = Atype->size ;
size_t zsize = op->ztype->size ;
size_t xsize = op->xtype->size ;
GB_cast_function
cast_A_to_X = GB_cast_factory (op->xtype->code, Atype->code) ;
GxB_unary_function fop = op->function ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// xwork = (xtype) Ax [p]
GB_void xwork [GB_VLA(xsize)] ;
cast_A_to_X (xwork, Ax +(p*asize), asize) ;
// Cx [p] = fop (xwork)
fop (Cx +(p*zsize), xwork) ;
}
}
else
{
//----------------------------------------------------------------------
// binary operator
//----------------------------------------------------------------------
ASSERT_BINARYOP_OK (op2, "standard op2 for GB_apply_op", GB0) ;
ASSERT_SCALAR_OK (scalar, "scalar for GB_apply_op", GB0) ;
// determine number of threads to use
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
GB_Type_code xcode, ycode, zcode ;
bool op_is_first = (opcode == GB_FIRST_opcode) ;
bool op_is_second = (opcode == GB_SECOND_opcode) ;
bool op_is_pair = (opcode == GB_PAIR_opcode) ;
size_t asize = Atype->size ;
size_t ssize = scalar->type->size ;
size_t zsize = op2->ztype->size ;
size_t xsize = op2->xtype->size ;
size_t ysize = op2->ytype->size ;
GB_Type_code scode = scalar->type->code ;
xcode = op2->xtype->code ;
ycode = op2->ytype->code ;
// typecast the scalar to the operator input
bool ignore_scalar = false ;
size_t ssize_cast ;
GB_Type_code scode_cast ;
if (binop_bind1st)
{
ssize_cast = xsize ;
scode_cast = xcode ;
ignore_scalar = op_is_second || op_is_pair ;
}
else
{
ssize_cast = ysize ;
scode_cast = ycode ;
ignore_scalar = op_is_first || op_is_pair ;
}
GB_void swork [GB_VLA(ssize_cast)] ;
GB_void *scalarx = (GB_void *) scalar->x ;
if (scode_cast != scode && !ignore_scalar)
{
// typecast the scalar to the operator input, in swork
GB_cast_function cast_s = GB_cast_factory (scode_cast, scode) ;
cast_s (swork, scalar->x, ssize) ;
scalarx = swork ;
}
#ifndef GBCOMPACT
if (binop_bind1st)
{
//--------------------------------------------------------------
// z = op(scalar,Ax)
//--------------------------------------------------------------
if (GB_binop_builtin (
op2->xtype, ignore_scalar,
Atype, op_is_first || op_is_pair,
op2, false, &opcode, &xcode, &ycode, &zcode))
{
//----------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------
#define GB_bind1st(op,xname) GB_bind1st_ ## op ## xname
#define GB_BINOP_WORKER(op,xname) \
{ \
if (GB_bind1st (op, xname) (Cx, scalarx, Ax, Ab, anz,\
nthreads) == GrB_SUCCESS) return (GrB_SUCCESS) ; \
} \
break ;
//----------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------
#define GB_NO_SECOND
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
}
else
{
//--------------------------------------------------------------
// z = op(Ax,scalar)
//--------------------------------------------------------------
if (GB_binop_builtin (
Atype, op_is_second || op_is_pair,
op2->ytype, ignore_scalar,
op2, false, &opcode, &xcode, &ycode, &zcode))
{
//----------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------
#define GB_bind2nd(op,xname) GB_bind2nd_ ## op ## xname
#undef GB_BINOP_WORKER
#define GB_BINOP_WORKER(op,xname) \
{ \
if (GB_bind2nd (op, xname) (Cx, Ax, scalarx, Ab, anz,\
nthreads) == GrB_SUCCESS) return (GrB_SUCCESS) ; \
} \
break ;
//----------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------
#define GB_NO_FIRST
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
}
#endif
//----------------------------------------------------------------------
// generic worker: typecast and apply a binary operator
//----------------------------------------------------------------------
GB_BURBLE_N (anz, "(generic apply: %s) ", op2->name) ;
GB_Type_code acode = Atype->code ;
GxB_binary_function fop = op2->function ;
if (binop_bind1st)
{
// Cx = op (scalar,Ax)
GB_cast_function cast_A_to_Y = GB_cast_factory (ycode, acode) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// ywork = (ytype) Ax [p]
GB_void ywork [GB_VLA(ysize)] ;
cast_A_to_Y (ywork, Ax +(p*asize), asize) ;
// Cx [p] = fop (xwork, ywork)
fop (Cx +(p*zsize), scalarx, ywork) ;
}
}
else
{
// Cx = op (Ax,scalar)
GB_cast_function cast_A_to_X = GB_cast_factory (xcode, acode) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// xwork = (xtype) Ax [p]
GB_void xwork [GB_VLA(xsize)] ;
cast_A_to_X (xwork, Ax +(p*asize), asize) ;
// Cx [p] = fop (xwork, ywork)
fop (Cx +(p*zsize), xwork, scalarx) ;
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (GrB_SUCCESS) ;
}
|
GB_unop__isfinite_bool_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isfinite_bool_fc32)
// op(A') function: GB (_unop_tran__isfinite_bool_fc32)
// C type: bool
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = GB_cisfinitef (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisfinitef (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = GB_cisfinitef (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isfinite_bool_fc32)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = GB_cisfinitef (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = GB_cisfinitef (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isfinite_bool_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ExampleReservoirs_Shared.h | /**
* grove: ExampleReservoirs_Shared.h
* Copyright (c) Torr Vision Group, University of Oxford, 2017. All rights reserved.
*/
#ifndef H_GROVE_EXAMPLERESERVOIRS_SHARED
#define H_GROVE_EXAMPLERESERVOIRS_SHARED
#include <ORUtils/PlatformIndependence.h>
#define ALWAYS_ADD_EXAMPLES 0
namespace grove {
/**
* \brief Attempts to add an example to some reservoirs.
*
* If the example is valid, we attempt to add it to each specified reservoir. If a
* reservoir is not full, then the example is added. Otherwise, if ALWAYS_ADD_EXAMPLES
* is 1, a randomly-selected existing example is discarded and replaced by the current
* example. If ALWAYS_ADD_EXAMPLES is 0, then an additional random decision is made as
* to *whether* to replace an existing example.
*
* \param example The example to attempt to add to the reservoirs.
* \param reservoirIndices The indices of the reservoirs to which to attempt to add the example.
* \param reservoirIndexCount The number of reservoirs to which to attempt to add the example.
* \param reservoirs The example reservoirs: an image in which each row allows the storage of up to reservoirCapacity examples.
* \param reservoirSizes The current size of each reservoir.
* \param reservoirAddCalls The number of times the insertion of an example has been attempted for each reservoir.
* \param reservoirCapacity The capacity (maximum size) of each reservoir.
* \param randomGenerator A random number generator.
*/
template <typename ExampleType, typename RNGType>
_CPU_AND_GPU_CODE_TEMPLATE_
inline void add_example_to_reservoirs(const ExampleType& example, const int *reservoirIndices, uint32_t reservoirIndexCount,
ExampleType *reservoirs, int *reservoirSizes, int *reservoirAddCalls, uint32_t reservoirCapacity,
RNGType& randomGenerator)
{
// If the example is invalid, early out.
if(!example.valid) return;
// Try to add the example to each specified reservoir.
for(uint32_t i = 0; i < reservoirIndexCount; ++i)
{
// The reservoir index (this corresponds to a row in the reservoirs image).
const int reservoirIdx = reservoirIndices[i];
// The raster index (in the reservoirs image) of the first example in the reservoir.
const int reservoirStartIdx = reservoirIdx * reservoirCapacity;
// Get the total number of add calls that have ever been made for the current reservoir, and increment it for next time.
uint32_t oldAddCallsCount = 0;
#ifdef __CUDACC__
oldAddCallsCount = atomicAdd(&reservoirAddCalls[reservoirIdx], 1);
#else
#ifdef WITH_OPENMP
#pragma omp atomic capture
#endif
oldAddCallsCount = reservoirAddCalls[reservoirIdx]++;
#endif
// If the old total number of add calls is less than the reservoir's capacity, then we can immediately add the example.
// Otherwise, we need to decide whether or not to replace an existing example with this one.
if(oldAddCallsCount < reservoirCapacity)
{
// Store the example in the reservoir.
reservoirs[reservoirStartIdx + oldAddCallsCount] = example;
// Increment the reservoir's size. Note that it is not strictly necessary to
// maintain the reservoir sizes separately, since we can obtain the same
// information from reservoirAddCalls by clamping the values to the reservoir
// capacity, but writing it this way is much clearer and the cost in efficiency
// is limited in practice.
#ifdef __CUDACC__
atomicAdd(&reservoirSizes[reservoirIdx], 1);
#else
#ifdef WITH_OPENMP
#pragma omp atomic
#endif
++reservoirSizes[reservoirIdx];
#endif
}
else
{
#if ALWAYS_ADD_EXAMPLES
// Generate a random offset that will always result in an example being evicted from the reservoir.
const uint32_t randomOffset = randomGenerator.generate_int_from_uniform(0, reservoirCapacity - 1);
#else
// Generate a random offset that may or may not result in an example being evicted from the reservoir.
const uint32_t randomOffset = randomGenerator.generate_int_from_uniform(0, oldAddCallsCount - 1);
#endif
// If the random offset corresponds to an example in the reservoir, replace that with the new example.
if(randomOffset < reservoirCapacity)
{
reservoirs[reservoirStartIdx + randomOffset] = example;
}
}
}
}
}
#endif
|
schelude-clause-guide.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n=16,chunk,a[n],suma=0;
if(argc < 2) {
fprintf(stderr,"\nFalta iteraciones y/o chunk \n");
exit(-1);
}
//n = atoi(argv[1]);
if (n>20) n=20;
chunk = atoi(argv[1]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma) schedule(guided,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",
omp_get_thread_num(),i,a[i],suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
}
|
AnisotropyFibreCase.h | #ifndef ANISOTROPYFIBRECASE_H
#define ANISOTROPYFIBRECASE_H
#include "geometry.h"
#include <cstdio>
#include <fstream>
#include "vec.h"
#include "randomnumber.h"
#include "distribution.h"
#include "cell.h"
#include "descriptor.h"
#include "block3D.h"
#include "fibreset.h"
#include "omp.h"
template <typename Descriptor>
class AnisotropyFibreCase
{
public:
AnisotropyFibreCase(int Grid_X, int Grid_Y, int Grid_Z,double voidage, double Grid_Radius, double beta_)
:fibreblock(Grid_X,Grid_Y,Grid_Z),dis(beta_){
setFibreSet(voidage,Grid_Radius);
printFibreBlock();
int sum=checkSolid();
double tempvoidage=1-(double)sum/fibreblock.getBoundingBox().nCells();
printf("%f\n",tempvoidage);
amendFibreSet(voidage,Grid_Radius);
printf("%d\n",fs.getN());
checkSurface();
exportFibreSetParameter();
exportRawData();
}
AnisotropyFibreCase(int Grid_X, int Grid_Y, int Grid_Z,double beta_,FibreSet fs_)
:fibreblock(Grid_X,Grid_Y,Grid_Z),dis(beta_),fs(fs_){
printFibreBlock();
checkSurface();
exportRawData();
}
AnisotropyFibreCase(int Grid_X, int Grid_Y, int Grid_Z,BlockLattice3D<Descriptor> lattice_):fibreblock(lattice_){
checkSurface();
}
~AnisotropyFibreCase() { }
void printFibreBlock() {
int Grid_X=fibreblock.getBoundingBox().getNx();
int Grid_Y=fibreblock.getBoundingBox().getNy();
int Grid_Z=fibreblock.getBoundingBox().getNz();
#pragma omp parallel for
for(int i = 0; i < Grid_X; i++){
for (int j = 0; j < Grid_Y; j++){
for(int k = 0; k < Grid_Z; k++){
Cell<Descriptor> modify=fibreblock.get(i,j,k);
for(int f=1;f<=fs.getN()&&modify[0]==0;f++){
modify[0]=contained(i,j,k,fs[f]);
}
fibreblock.get(i,j,k).attributeValues(modify);
}
}
}
}
void amendFibreSet(double voidage,double Grid_Radius){
int Grid_X=fibreblock.getBoundingBox().getNx();
int Grid_Y=fibreblock.getBoundingBox().getNy();
int Grid_Z=fibreblock.getBoundingBox().getNz();
int sum=checkSolid();
int totalN=fibreblock.getBoundingBox().nCells();
double tempvoidage=1-(double)sum/(double)totalN;
while(tempvoidage>voidage){
BetaFibre newfibre(fibreblock.getBoundingBox(),dis.getRhoMap(),1000,Grid_Radius,rng);
int tempsum=0;
#pragma omp parallel for reduction(+:tempsum)
for(int i = 0; i < Grid_X; i++){
for (int j = 0; j < Grid_Y; j++){
for(int k = 0; k < Grid_Z; k++){
Cell<Descriptor> modify=fibreblock.get(i,j,k);
if(modify[0]==0){
modify[0]=contained(i,j,k,newfibre);
fibreblock.get(i,j,k).attributeValues(modify);
tempsum+=modify[0];
}
}
}
}
sum+=tempsum;
tempvoidage=1-(double)sum/(double)totalN;
printf("%f\n",tempvoidage);
fs.insertFibre(newfibre);
}
}
void setFibreSet(double voidage,double Grid_Radius) {
int Grid_X=fibreblock.getBoundingBox().getNx();
int Grid_Y=fibreblock.getBoundingBox().getNy();
int Grid_Z=fibreblock.getBoundingBox().getNz();
double virtual_long = sqrt(pow(Grid_X, 2) + pow(Grid_Y, 2) + pow(Grid_Z, 2));
double onefibre = virtual_long*(pow(Grid_Radius, 2)*3.14159);
double volume = Grid_X*Grid_Y*Grid_Z*(1 - voidage);
//the predict number of fibre set
int t_first = 1.5*(int)(volume / onefibre);
Box3D box=fibreblock.getBoundingBox();
for (int i = 0; i < t_first; i++){
fs.insertFibre(BetaFibre(box,dis.getRhoMap(),1000,Grid_Radius,rng));
}
}
BlockLattice3D<Descriptor> const& getBlockLattice() const{
return fibreblock;
}
AnisotropyDistribution const& getDistribution() const{
return dis;
}
FibreSet const& getFibreSet() const{
return fs;
}
void checkSurface(){
int Grid_X=fibreblock.getBoundingBox().getNx();
int Grid_Y=fibreblock.getBoundingBox().getNy();
int Grid_Z=fibreblock.getBoundingBox().getNz();
for (int i = 0; i < Grid_X; i++){
for (int j = 0; j < Grid_Y; j++){
for (int k = 0; k < Grid_Z; k++){
if (fibreblock.get(i,j,k)[0]==1){
int sum=0;
for (int ix = -1; ix <= 1; ix++){
for (int iy = -1; iy <= 1; iy++){
for (int iz = -1; iz <= 1; iz++){
if(contained(i+ix,j+iy,k+iz,fibreblock.getBoundingBox())) sum+=fibreblock.get(i+ix,j+iy,k+iz)[0];
}
}
}
if(sum==27) fibreblock.get(i,j,k)[1]=2;
else fibreblock.get(i,j,k)[1]=1;
}
}
}
}
}
int checkSolid(){
int Grid_X=fibreblock.getBoundingBox().getNx();
int Grid_Y=fibreblock.getBoundingBox().getNy();
int Grid_Z=fibreblock.getBoundingBox().getNz();
int sum=0;
#pragma omp parallel for reduction(+:sum)
for(int i = 0; i < Grid_X; i++){
for (int j = 0; j < Grid_Y; j++){
for(int k = 0; k < Grid_Z; k++){
sum+=fibreblock.get(i,j,k)[0];
}
}
}
return sum;
}
void exportFibreSetParameter(){
ofstream fout;
fout.open("FibreParameter.dat", ios::ate);
fout<<fibreblock.getNx()<<"\t"<<fibreblock.getNy()<<"\t"<<fibreblock.getNz()<<endl;
fout<<fs.getN()<<endl;
for ( int f = 1; f <= fs.getN(); f++){
fout<<fs[f].getBasePoint().x<<"\t"<<fs[f].getBasePoint().y<<"\t"<<fs[f].getBasePoint().z<<"\t"
<<fs[f].getDirection().getNx()<<"\t"<<fs[f].getDirection().getNy()<<"\t"<<fs[f].getDirection().getNz()<<"\t"
<<fs[f].getRadius()<<endl;
}
fout.close();
}
void exportRawData(){
int Grid_X=fibreblock.getNx();
int Grid_Y=fibreblock.getNy();
int Grid_Z=fibreblock.getNz();
ofstream fout;
fout.open("RawData.dat", ios::ate);
for (int i = 0; i < Grid_X; i++){
for (int j = 0; j < Grid_Y; j++){
for (int k = 0; k < Grid_Z; k++){
fout << fibreblock.get(i, j, k)[0] << "\t" << endl;
}
}
}
}
private:
BlockLattice3D<Descriptor> fibreblock;
AnisotropyDistribution dis;
RandomNumber rng;
FibreSet fs;
};
#endif //ANISOTROPYFIBRECASE_H |
trmm_x_coo_u_hi_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t
ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
printf("trmm_coo_u_hi_row\n");
ALPHA_INT m = mat->rows;
ALPHA_INT n = columns;
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT r = 0; r < m; r++)
for (ALPHA_INT c = 0; c < columns; c++)
{
ALPHA_Number t1, t2;
alpha_mul(t1, y[r * ldy + c], beta);
alpha_mul(t2, alpha, x[index2(r, c, ldx)]);
alpha_add(y[r * ldy + c], t1, t2);
}
#ifdef _OPENMP
#pragma omp parallel num_threads(num_threads)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
for (ALPHA_INT ai = 0; ai < mat->nnz; ++ai)
{
ALPHA_INT cr = mat->row_indx[ai];
if (cr % num_threads != tid)
continue;
ALPHA_Number *Y = &y[index2(cr, 0, ldy)];
if (mat->col_indx[ai] > cr)
{
ALPHA_Number val;
alpha_mul(val, alpha, mat->values[ai]);
const ALPHA_Number *X = &x[index2(mat->col_indx[ai], 0, ldx)];
for (ALPHA_INT c = 0; c < n; ++c)
alpha_madde(Y[c], val, X[c]);
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
GB_binop__ne_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint8)
// A*D function (colscale): GB (_AxD__ne_uint8)
// D*A function (rowscale): GB (_DxB__ne_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint8)
// C=scalar+B GB (_bind1st__ne_uint8)
// C=scalar+B' GB (_bind1st_tran__ne_uint8)
// C=A+scalar GB (_bind2nd__ne_uint8)
// C=A'+scalar GB (_bind2nd_tran__ne_uint8)
// C type: bool
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT8 || GxB_NO_NE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <SDL2/SDL.h>
#include <SDL2/SDL_timer.h>
#ifdef _OPENMP
#include <omp.h>
#else
#ifndef _ESCAPE_OPENMP
#define omp_get_num_threads() 1
#define omp_get_thread_num() 0
#define omp_get_max_threads() 0
#define omp_lock_t int
#define omp_set_lock(lck) 0
#define omp_unset_lock(lck) 0
#define omp_init_lock(lck) 0
#define _ESCAPE_OPENMP
#endif
#endif
#ifndef CACHE_LINE_SIZE
#define CACHE_LINE_SIZE 64
#endif
#include "AABB.h"
#include "Cells.h"
#include "Grid.h"
#define DO_LOGGING
//#define ISOTHERMAL_RESIZE
#define OUTPUT_CSV
Grid* mainGrid;
SDL_Texture* GridTex;
int NUM_BALLS = 500;
// acceleration due to gravity
double gravity_acceleration = 2;
// velocity loss per interaction, multiplicitive
double restitution_coefficient = 1;
double max_ball_size = 10;
int CYCLES_PER_FRAME = 3;
int DO_DEBUG = 0;
int DO_UNEQUAL = 0;
int OPACITY = 255;
int MAX_FPS = 60;
const int XRES = 960;
const int YRES = 720;
typedef struct Ball
{
Collider* collide2d;
SDL_Texture* texture;
int isColliding;
double r;
double cx;
double cy;
double vx;
double vy;
double m;
} Ball;
void makeBall(double r, double cx, double cy, double vx, double vy, double m, Ball* addr, Ball* buff)
{
Box hitbox;
hitbox.X0 = cx - r;
hitbox.X1 = cx + r;
hitbox.Y0 = cy - r;
hitbox.Y1 = cy + r;
addr->collide2d = makeCollider(hitbox, mainGrid, buff);
addr->r = r;
addr->cx = cx;
addr->cy = cy;
addr->vx = vx;
addr->vy = vy;
addr->m = m;
}
int makeBallTex(Ball* ball, SDL_Renderer* ren)
{
// calculate the center pixel of the texture
int cx = ceil(ball->r);
int cy = cx;
// initialize the texture
ball->texture = SDL_CreateTexture(ren, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_TARGET, 2 * cx + 1, 2 * cy + 1);
SDL_SetTextureBlendMode(ball->texture, SDL_BLENDMODE_BLEND);
SDL_SetRenderTarget(ren, ball->texture);
// now set the texture's backgrund to transparent
SDL_SetRenderDrawColor(ren, 0x00, 0x00, 0x00, 0x00);
SDL_RenderClear(ren);
// now fill in a circle in white, we will worry about coloring later
SDL_SetRenderDrawColor(ren, 0xff, 0xff, rand() % 256, 0xff);
for (int x = 0; x <= 2 * cx; x++)
{
for (int y = 0; y <= 2 * cy; y++)
{
// check if this point is within the circle
if ((x - cx) * (x - cx) + (y - cy) * (y - cy) < cx * cx)
{
SDL_RenderDrawPoint(ren, x, y);
}
}
}
SDL_SetRenderTarget(ren, NULL);
return 1;
}
/* Ball a will be mutated, and should not point to the buffer, b is in the buffer */
int ballCollide(Ball* a, const Ball* b)
{
double deltax = a->cx - b->cx;
double deltay = a->cy - b->cy;
double dsquared = deltax * deltax + deltay * deltay;
double rsum = a->r + b->r;
double deltavx = a->vx - b->vx;
double deltavy = a->vy - b->vy;
double vdotdisp = (deltax * deltavx + deltay * deltavy);
// only handle the collision if they are touching and moving towards each other (vdotdisp < 0)
if ((dsquared <= rsum * rsum) && vdotdisp < 0)
{
a->isColliding = 1;
double mul = (2 * b->m * vdotdisp) / ((a->m + b->m) * dsquared);
a->vx -= restitution_coefficient * mul * deltax;
a->vy -= restitution_coefficient * mul * deltay;
return 1;
}
else { return 0; }
}
uint32_t threadSafeXorShift(uint32_t *state)
{
*state ^= *state << 13;
*state ^= *state >> 17;
*state ^= *state << 5;
return *state;
}
/* pick a random double from a to b */
double randi(double a, double b)
{
return ((double)rand() / RAND_MAX) * (b - a) + a;
}
Ball* objects;
Ball* buf;
static Collider** ret;
Uint32 startTime = 0;
Uint32 endTime = 0;
uint32_t curr_update = 0;
static uint32_t htable_use;
int main(int argc, char* argv[])
{
// Argument parsing
if (argc > 3)
{
// the first argument determines the number of sprites,
// the second determines the gravity acceleration
NUM_BALLS = atoi(argv[1]);
if (NUM_BALLS < 0) { printf("NUM_BALLS cannot be negitive, abort!\n"); return -1; }
gravity_acceleration = atof(argv[2]);
max_ball_size = atof(argv[3]);
// if there are more than the required arguments, scan for valid options
if (argc > 4)
{
for (int i = 4; i < argc; i++)
{
if (strcmp(argv[i], "-d") == 0) { DO_DEBUG = 1; }
else if (strcmp(argv[i], "-u") == 0) { DO_UNEQUAL = 1; }
else if (strncmp(argv[i], "-c", 2) == 0)
{
int t = atoi(argv[i] + 2);
if (t > 0) { CYCLES_PER_FRAME = t; }
}
else if (strncmp(argv[i], "-e", 2) == 0)
{
float f = atof(argv[i] + 2);
if (f >= 0 && f <= 1) { restitution_coefficient = f; }
}
else if (strncmp(argv[i], "-o", 2) == 0)
{
int f = atoi(argv[i] + 2);
if (f >= 0 && f <= 255) { OPACITY = f; }
}
}
}
}
// Display the help page
else
{
printf(
"gridCollisions help: \n\n"
"Usage: %s particles gravity radius [flags]\n\n"
" -d Super secret debug mode\n"
" -u Spawn particles unbalenced with more velocity on one side\n"
" -c[int] Physics cycles per frame, must be nonzero\n"
" -e[float] Coefficient of restitution / efficiency, must be on [0, 1]\n"
" -o[int] Clearing opacity, lower values create trails, must be between 0 and 255\n",
argv[0]);
exit(0);
}
//printf("max_ball_size: %f\n", max_ball_size);
//fflush(stdout);
if (SDL_Init(SDL_INIT_EVERYTHING)) { printf("error."); return 0; }
SDL_Window* win;
SDL_Renderer* ren;
SDL_CreateWindowAndRenderer(XRES, YRES, SDL_WINDOW_RESIZABLE, &win, &ren);
SDL_SetRenderDrawBlendMode(ren, SDL_BLENDMODE_BLEND);
int grid_cell_size = ceil(2.0 * max_ball_size);
while (XRES % grid_cell_size) grid_cell_size++;
//printf("grid_cell_size: %d\n", grid_cell_size);
//fflush(stdout);
// declare random seeds
srand(18698238);
static uint32_t randr_state;
#pragma omp threadprivate(randr_state)
// make the grid so grid coords equal screen coords
mainGrid = makeGrid(1 + (int)XRES / grid_cell_size, 1 + (int)YRES / grid_cell_size, (double)grid_cell_size);
// allocate the sprite memory, using calloc because debuging has made me paranoid
objects = calloc(NUM_BALLS, sizeof(Ball));
buf = calloc(NUM_BALLS, sizeof(Ball));
#pragma omp threadprivate(ret)
#pragma omp parallel
ret = calloc(NUM_BALLS, sizeof(Collider*));
// initialize the hash table as well as other threadprivates once per thread
static hashTable* hTable;
#pragma omp threadprivate(hTable)
#pragma omp threadprivate(htable_use)
#pragma omp parallel
{
randr_state = 0xe7425723 ^ omp_get_thread_num();
htable_use = 0;
#ifdef __unix__
if (posix_memalign((void**)&hTable, CACHE_LINE_SIZE, sizeof(hashTable))) exit(1);
if (posix_memalign((void**)&hTable->items, CACHE_LINE_SIZE, NUM_BALLS * sizeof(hashItem))) exit(1);
#else
hTable = calloc(1, sizeof(hashTable));
hTable->items = calloc(NUM_BALLS, sizeof(hashItem));
#endif
hTable->len = NUM_BALLS;
}
for (int i = 0; i < NUM_BALLS; i++)
{
// Initalize the sprites on the heap at random positions
// I should really position them so they don't overlap each other,
// but it'll be fine, right?
double r = randi(max_ball_size * 1.0, max_ball_size);
makeBall(r, randi(r, XRES - r), randi(r, YRES - r), randi(-100, 100), randi(-100, 100), 3.14 * r * r, &objects[i], &buf[i]);
makeBallTex(&objects[i], ren);
if (DO_UNEQUAL && !(objects[i].cx < XRES / 2)) { objects[i].vx *= 3; objects[i].vy *= 3; }
insertToGrid(mainGrid, objects[i].collide2d, curr_update);
}
// Now make the grid texture for debugging
GridTex = SDL_CreateTexture(ren, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_TARGET, XRES, YRES);
SDL_SetRenderTarget(ren, GridTex);
SDL_SetRenderDrawColor(ren, 12, 32, 89, 255);
SDL_Rect bb;
bb.w = grid_cell_size;
bb.h = grid_cell_size;
for (bb.x = 0; bb.x < XRES; bb.x += grid_cell_size)
{
for (bb.y = 0; bb.y < YRES; bb.y += grid_cell_size)
{
SDL_RenderDrawRect(ren, &bb);
}
}
SDL_SetRenderTarget(ren, NULL);
// The minimum amount of time each cycle can take on average
double deltat = 1.0 / (double)(MAX_FPS * CYCLES_PER_FRAME);
SDL_Event e;
SDL_Rect winbox = { 0, 0, XRES, YRES };
SDL_SetWindowMaximumSize(win, XRES, YRES);
int window_width, window_height, last_window_width, last_window_height;
SDL_GetWindowPosition(win, &window_width, &window_height);
double right_edge_velocity = 0;
double bottom_edge_velocity = 0;
double accumualted_impulse = 0;
// Setup window pos variables
int winposX0, winposY0, winposX1, winposY1, winposX2, winposY2;
SDL_GetWindowPosition(win, &winposX0, &winposY0);
// set them all to be the same
winposX1 = winposX0;
winposY1 = winposY0;
winposX2 = winposX1;
winposX2 = winposY1;
startTime = SDL_GetTicks();
#ifdef OUTPUT_CSV
puts("left energy (rms), right energy (rms), total energy, average kinetic energy, average pressure, window area, P * V");
#endif
while (1)
{
// Check to see if the user is trying to close the program, to prevent hanging
while (SDL_PollEvent(&e))
{
switch (e.type)
{
case SDL_QUIT:
exit(0);
case SDL_WINDOWEVENT:
{
if (e.window.event == SDL_WINDOWEVENT_RESIZED)
{
winbox.w = e.window.data1;
winbox.h = e.window.data2;
}
}
}
}
// clear the background
if ((curr_update + 1) % CYCLES_PER_FRAME == 0) { SDL_SetRenderDrawColor(ren, 0, 0, 0, OPACITY); SDL_RenderFillRect(ren, &winbox); }
// copy to the buffer, which is acts as a static backup used when checking collisions,
// and is pointed to in each collider
memcpy(buf, objects, NUM_BALLS * sizeof(Ball));
// now, if debugging, and it is a draw cycle, draw the grid
if (DO_DEBUG && (curr_update + 1) % CYCLES_PER_FRAME == 0)
{
SDL_RenderCopy(ren, GridTex, NULL, NULL);
}
// loop to adjust velocities according to window movement
if ((curr_update + 1) % CYCLES_PER_FRAME == 0)
{
// record the window sizes
last_window_width = window_width;
last_window_height = window_height;
SDL_GetWindowSize(win, &window_width, &window_height);
#ifndef ISOTHERMAL_RESIZE
right_edge_velocity = -(double)(last_window_width - window_width) / (deltat * CYCLES_PER_FRAME);
bottom_edge_velocity = -(double)(last_window_height - window_height) / (deltat * CYCLES_PER_FRAME);
#endif
int tmpx, tmpy;
SDL_GetWindowPosition(win, &tmpx, &tmpy);
winposX2 = winposX1;
winposY2 = winposY1;
winposX1 = winposX0;
winposY1 = winposY0;
winposX0 = tmpx;
winposY0 = tmpy;
int winaccelX = (winposX2 - 2 * winposX1 + winposX0) / (deltat * deltat);
int winaccelY = (winposY2 - 2 * winposY1 + winposY0) / (deltat * deltat);
if (curr_update > 5 * CYCLES_PER_FRAME)
{
for (int i = 0; i < NUM_BALLS; i++)
{
objects[i].vx -= winaccelX * deltat / CYCLES_PER_FRAME;
objects[i].vy -= winaccelY * deltat / CYCLES_PER_FRAME;
}
}
}
// collision loop
#pragma omp parallel for
for (int i = 0; i < NUM_BALLS; i++)
{
objects[i].isColliding = 0;
// reset the hash table to be effectively empty
hTable->num = 0;
htable_use++;
// we don't want our search to return this current collider,
// so add it to the hash table, which is used inside the
// queryBox function to avoid reduntant results
insertHashItem(hTable, (intptr_t)(buf + i), htable_use);
// search for the other colliders (in buf) which are near this
// collider's hitbox, and put them in ret
int nresults = queryBox(mainGrid, objects[i].collide2d->hitbox, ret, hTable, NUM_BALLS, curr_update, htable_use, threadSafeXorShift(&randr_state) % 2, 0);
// now, handle the physics for each collision by looping over returned values
int r = threadSafeXorShift(&randr_state) % 2;
for (int j = 0; j < nresults; j++)
{
// set object[i]'s velocity according to elastic collision
if (r) ballCollide(&objects[i], ret[j]->sprite);
else ballCollide(&objects[i], ret[nresults - j - 1]->sprite);
#ifndef _OPENMP
if (DO_DEBUG && (curr_update + 1) % CYCLES_PER_FRAME == 0)
{
SDL_SetRenderDrawColor(ren, 255, 255, 255, 255);
SDL_RenderDrawLine(ren, objects[i].cx, objects[i].cy, ((Ball*)ret[j]->sprite)->cx, ((Ball*)ret[j]->sprite)->cy);
}
#endif
}
// bounce off the walls
Box hit = objects[i].collide2d->hitbox;
if (hit.X0 < 0 && objects[i].vx < 0)
{
accumualted_impulse += abs(objects[i].m * objects[i].vx);
objects[i].vx = objects[i].vx * -restitution_coefficient; //- (1 + restitution_coefficient) * right_edge_velocity;
objects[i].vy *= restitution_coefficient;
accumualted_impulse += abs(objects[i].m * objects[i].vx);
}
if (hit.X1 > window_width && objects[i].vx - right_edge_velocity > 0)
{
accumualted_impulse += abs(objects[i].m * objects[i].vx);
objects[i].vx = objects[i].vx * -restitution_coefficient + (1 + restitution_coefficient) * right_edge_velocity;
objects[i].vy *= restitution_coefficient;
accumualted_impulse += abs(objects[i].m * objects[i].vx);
}
if (hit.Y0 < 0 && objects[i].vy < 0)
{
accumualted_impulse += abs(objects[i].m * objects[i].vy);
objects[i].vy = objects[i].vy * -restitution_coefficient; //- (1 + restitution_coefficient) * bottom_edge_velocity;
objects[i].vx *= restitution_coefficient;
accumualted_impulse += abs(objects[i].m * objects[i].vy);
}
if (hit.Y1 > window_height && objects[i].vy - bottom_edge_velocity > 0)
{
accumualted_impulse += abs(objects[i].m * objects[i].vy);
objects[i].vy = objects[i].vy * -restitution_coefficient + (1 + restitution_coefficient) * bottom_edge_velocity;
objects[i].vx *= restitution_coefficient; objects[i].isColliding = 1;
accumualted_impulse += abs(objects[i].m * objects[i].vy);
}
// do gravity if not colliding
if (!objects[i].isColliding) { objects[i].vy += gravity_acceleration * deltat; }
}
curr_update++;
#pragma omp parallel for
for (int i = 0; i < NUM_BALLS; i++)
{
// calculate the position step
double dx = objects[i].vx * deltat;
double dy = objects[i].vy * deltat;
// update the hitbox
objects[i].collide2d->hitbox.X0 += dx;
objects[i].collide2d->hitbox.Y0 += dy;
objects[i].collide2d->hitbox.X1 += dx;
objects[i].collide2d->hitbox.Y1 += dy;
// update the circle center
objects[i].cx += dx;
objects[i].cy += dy;
// re-insert this sprite into the grid
insertToGrid(mainGrid, objects[i].collide2d, curr_update);
}
if (curr_update % CYCLES_PER_FRAME == 0 )
{
for (int i = 0; i < NUM_BALLS; i++)
{
// part of the purpose of this demo is to visualize
// energy transfer using colors, where more red means
// more energy, and more green means less energy
double energy = 0.0001 * objects[i].m / (max_ball_size * max_ball_size) * (objects[i].vx * objects[i].vx + objects[i].vy * objects[i].vy);
// using a squishification function to limit sig to a range of 0 - 1
float sig = 1 - 1 / (1 + energy + energy * energy / 2);
// now adjust the hue according to sig
float colormin = 46;
float colormax = 234;
int r = (int)((sig > 0.5) ? colormax : colormin + (colormax - colormin) * 2 * sig);
int g = (int)((sig < 0.5) ? colormax : colormin - (colormax - colormin) * 2 * (sig - 1));
// draw the ball
SDL_SetTextureColorMod(objects[i].texture, r, g, 36);
SDL_Rect boundbox = { round(objects[i].cx - objects[i].r) - 1, round(objects[i].cy - objects[i].r) - 1, 2 * ceil(objects[i].r) + 2, 2 * ceil(objects[i].r) + 2 };
SDL_RenderCopy(ren, objects[i].texture, NULL, &boundbox);
}
}
// clear the hash table to make me feel better
memset(hTable->items, 0, hTable->len * sizeof(hashItem));
if (curr_update % CYCLES_PER_FRAME == 0)
{
SDL_RenderPresent(ren);
endTime = SDL_GetTicks();
double dt = endTime - startTime;
// limit framerate
int milis = 1000 / MAX_FPS;
if (dt < milis) { SDL_Delay(milis - dt); }
if (DO_DEBUG && curr_update % (CYCLES_PER_FRAME * MAX_FPS) == 0) printf("\x1b[2JFPS: %f\n", (dt < milis ? MAX_FPS : 1000.0 / (double)dt));
startTime = SDL_GetTicks();
}
#ifdef DO_LOGGING
if ((curr_update % (CYCLES_PER_FRAME * MAX_FPS)) == 0)
{
double left_energy = 0;
int left_count = 0;
double right_energy = 0;
int right_count = 0;
double total_energy = 0;
for (int i = 0; i < NUM_BALLS; i++)
{
double particle_energy = objects[i].m * (objects[i].vx * objects[i].vx + objects[i].vy * objects[i].vy + objects[i].cy);
total_energy += particle_energy;
if (objects[i].cx < window_width / 2)
{
left_energy += particle_energy;
left_count++;
}
else
{
right_energy += particle_energy;
right_count++;
}
}
/*
double left_chi = 0;
double left_mean = left_energy * left_energy / left_count * left_count;
double right_chi = 0;
double right_mean = right_energy / right_count;
for (int i = 0; i < NUM_BALLS; i++)
{
double particle_energy = objects[i].m * (objects[i].vx * objects[i].vx + objects[i].vy * objects[i].vy);
if (objects[i].cx < window_width / 2)
{
left_chi += (left_mean - particle_energy) * (left_mean - particle_energy);
}
else
{
right_chi += (right_mean - particle_energy) * (right_mean - particle_energy);
}
}
*/
// delta t == 1
double average_pressure = accumualted_impulse / (2 * (window_width + window_height));
accumualted_impulse = 0;
#ifndef OUTPUT_CSV
puts("--------------------------------------");
printf("left energy (rms): %f\n", left_energy / left_count);
//printf("left chi: %f\n", sqrt(left_chi / left_count));
printf("right energy (rms): %f\n", right_energy / right_count);
//printf("right chi: %f\n", sqrt(right_chi / right_count));
printf("total energy: %f\n", total_energy);
printf("average kinetic energy (rms): %f\n", (left_energy + right_energy) / NUM_BALLS);
printf("average pressure: %f\n", average_pressure);
printf("window area: %d\n", window_width * window_height);
printf("P * V: %f\n", average_pressure * window_width * window_height);
#else
printf("%f, ", left_energy / left_count);
//printf("%f, ", sqrt(left_chi / left_count));
printf("%f, ", right_energy / right_count);
//printf("%f, ", sqrt(right_chi / left_count));
printf("%f, ", left_energy + right_energy);
printf("%f, ", (left_energy + right_energy) / NUM_BALLS);
printf("%f, ", average_pressure);
printf("%d, ", window_width * window_height);
printf("%f\n", average_pressure * window_width * window_height);
#endif
}
#endif
}
return 0;
}
|
decl2.c | /* Process declarations and variables for C++ compiler.
Copyright (C) 1988-2018 Free Software Foundation, Inc.
Hacked by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Process declarations and symbol lookup for C++ front end.
Also constructs types; the standard scalar types at initialization,
and structure, union, array and enum types when they are declared. */
/* ??? not all decl nodes are given the most useful possible
line numbers. For example, the CONST_DECLs for enum values. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "memmodel.h"
#include "target.h"
#include "cp-tree.h"
#include "c-family/c-common.h"
#include "timevar.h"
#include "stringpool.h"
#include "cgraph.h"
#include "varasm.h"
#include "attribs.h"
#include "stor-layout.h"
#include "calls.h"
#include "decl.h"
#include "toplev.h"
#include "c-family/c-objc.h"
#include "c-family/c-pragma.h"
#include "dumpfile.h"
#include "intl.h"
#include "c-family/c-ada-spec.h"
#include "asan.h"
/* Id for dumping the raw trees. */
int raw_dump_id;
extern cpp_reader *parse_in;
/* This structure contains information about the initializations
and/or destructions required for a particular priority level. */
typedef struct priority_info_s {
/* Nonzero if there have been any initializations at this priority
throughout the translation unit. */
int initializations_p;
/* Nonzero if there have been any destructions at this priority
throughout the translation unit. */
int destructions_p;
} *priority_info;
static void mark_vtable_entries (tree);
static bool maybe_emit_vtables (tree);
static tree start_objects (int, int);
static void finish_objects (int, int, tree);
static tree start_static_storage_duration_function (unsigned);
static void finish_static_storage_duration_function (tree);
static priority_info get_priority_info (int);
static void do_static_initialization_or_destruction (tree, bool);
static void one_static_initialization_or_destruction (tree, tree, bool);
static void generate_ctor_or_dtor_function (bool, int, location_t *);
static int generate_ctor_and_dtor_functions_for_priority (splay_tree_node,
void *);
static tree prune_vars_needing_no_initialization (tree *);
static void write_out_vars (tree);
static void import_export_class (tree);
static tree get_guard_bits (tree);
static void determine_visibility_from_class (tree, tree);
static bool determine_hidden_inline (tree);
static void maybe_instantiate_decl (tree);
/* A list of static class variables. This is needed, because a
static class variable can be declared inside the class without
an initializer, and then initialized, statically, outside the class. */
static GTY(()) vec<tree, va_gc> *pending_statics;
/* A list of functions which were declared inline, but which we
may need to emit outline anyway. */
static GTY(()) vec<tree, va_gc> *deferred_fns;
/* A list of decls that use types with no linkage, which we need to make
sure are defined. */
static GTY(()) vec<tree, va_gc> *no_linkage_decls;
/* A vector of alternating decls and identifiers, where the latter
is to be an alias for the former if the former is defined. */
static GTY(()) vec<tree, va_gc> *mangling_aliases;
/* hash traits for declarations. Hashes single decls via
DECL_ASSEMBLER_NAME_RAW. */
struct mangled_decl_hash : ggc_remove <tree>
{
typedef tree value_type; /* A DECL. */
typedef tree compare_type; /* An identifier. */
static hashval_t hash (const value_type decl)
{
return IDENTIFIER_HASH_VALUE (DECL_ASSEMBLER_NAME_RAW (decl));
}
static bool equal (const value_type existing, compare_type candidate)
{
tree name = DECL_ASSEMBLER_NAME_RAW (existing);
return candidate == name;
}
static inline void mark_empty (value_type &p) {p = NULL_TREE;}
static inline bool is_empty (value_type p) {return !p;}
static bool is_deleted (value_type e)
{
return e == reinterpret_cast <value_type> (1);
}
static void mark_deleted (value_type &e)
{
e = reinterpret_cast <value_type> (1);
}
};
/* A hash table of decls keyed by mangled name. Used to figure out if
we need compatibility aliases. */
static GTY(()) hash_table<mangled_decl_hash> *mangled_decls;
/* Nonzero if we're done parsing and into end-of-file activities. */
int at_eof;
/* True if note_mangling_alias should enqueue mangling aliases for
later generation, rather than emitting them right away. */
bool defer_mangling_aliases = true;
/* Return a member function type (a METHOD_TYPE), given FNTYPE (a
FUNCTION_TYPE), CTYPE (class type), and QUALS (the cv-qualifiers
that apply to the function). */
tree
build_memfn_type (tree fntype, tree ctype, cp_cv_quals quals,
cp_ref_qualifier rqual)
{
tree raises;
tree attrs;
int type_quals;
bool late_return_type_p;
if (fntype == error_mark_node || ctype == error_mark_node)
return error_mark_node;
gcc_assert (TREE_CODE (fntype) == FUNCTION_TYPE
|| TREE_CODE (fntype) == METHOD_TYPE);
type_quals = quals & ~TYPE_QUAL_RESTRICT;
ctype = cp_build_qualified_type (ctype, type_quals);
raises = TYPE_RAISES_EXCEPTIONS (fntype);
attrs = TYPE_ATTRIBUTES (fntype);
late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (fntype);
fntype = build_method_type_directly (ctype, TREE_TYPE (fntype),
(TREE_CODE (fntype) == METHOD_TYPE
? TREE_CHAIN (TYPE_ARG_TYPES (fntype))
: TYPE_ARG_TYPES (fntype)));
if (attrs)
fntype = cp_build_type_attribute_variant (fntype, attrs);
if (rqual)
fntype = build_ref_qualified_type (fntype, rqual);
if (raises)
fntype = build_exception_variant (fntype, raises);
if (late_return_type_p)
TYPE_HAS_LATE_RETURN_TYPE (fntype) = 1;
return fntype;
}
/* Return a variant of FNTYPE, a FUNCTION_TYPE or METHOD_TYPE, with its
return type changed to NEW_RET. */
tree
change_return_type (tree new_ret, tree fntype)
{
tree newtype;
tree args = TYPE_ARG_TYPES (fntype);
tree raises = TYPE_RAISES_EXCEPTIONS (fntype);
tree attrs = TYPE_ATTRIBUTES (fntype);
bool late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (fntype);
if (new_ret == error_mark_node)
return fntype;
if (same_type_p (new_ret, TREE_TYPE (fntype)))
return fntype;
if (TREE_CODE (fntype) == FUNCTION_TYPE)
{
newtype = build_function_type (new_ret, args);
newtype = apply_memfn_quals (newtype,
type_memfn_quals (fntype),
type_memfn_rqual (fntype));
}
else
newtype = build_method_type_directly
(class_of_this_parm (fntype), new_ret, TREE_CHAIN (args));
if (FUNCTION_REF_QUALIFIED (fntype))
newtype = build_ref_qualified_type (newtype, type_memfn_rqual (fntype));
if (raises)
newtype = build_exception_variant (newtype, raises);
if (attrs)
newtype = cp_build_type_attribute_variant (newtype, attrs);
if (late_return_type_p)
TYPE_HAS_LATE_RETURN_TYPE (newtype) = 1;
return newtype;
}
/* Build a PARM_DECL of FN with NAME and TYPE, and set DECL_ARG_TYPE
appropriately. */
tree
cp_build_parm_decl (tree fn, tree name, tree type)
{
tree parm = build_decl (input_location,
PARM_DECL, name, type);
DECL_CONTEXT (parm) = fn;
/* DECL_ARG_TYPE is only used by the back end and the back end never
sees templates. */
if (!processing_template_decl)
DECL_ARG_TYPE (parm) = type_passed_as (type);
return parm;
}
/* Returns a PARM_DECL of FN for a parameter of the indicated TYPE, with the
indicated NAME. */
tree
build_artificial_parm (tree fn, tree name, tree type)
{
tree parm = cp_build_parm_decl (fn, name, type);
DECL_ARTIFICIAL (parm) = 1;
/* All our artificial parms are implicitly `const'; they cannot be
assigned to. */
TREE_READONLY (parm) = 1;
return parm;
}
/* Constructors for types with virtual baseclasses need an "in-charge" flag
saying whether this constructor is responsible for initialization of
virtual baseclasses or not. All destructors also need this "in-charge"
flag, which additionally determines whether or not the destructor should
free the memory for the object.
This function adds the "in-charge" flag to member function FN if
appropriate. It is called from grokclassfn and tsubst.
FN must be either a constructor or destructor.
The in-charge flag follows the 'this' parameter, and is followed by the
VTT parm (if any), then the user-written parms. */
void
maybe_retrofit_in_chrg (tree fn)
{
tree basetype, arg_types, parms, parm, fntype;
/* If we've already add the in-charge parameter don't do it again. */
if (DECL_HAS_IN_CHARGE_PARM_P (fn))
return;
/* When processing templates we can't know, in general, whether or
not we're going to have virtual baseclasses. */
if (processing_template_decl)
return;
/* We don't need an in-charge parameter for constructors that don't
have virtual bases. */
if (DECL_CONSTRUCTOR_P (fn)
&& !CLASSTYPE_VBASECLASSES (DECL_CONTEXT (fn)))
return;
arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn));
basetype = TREE_TYPE (TREE_VALUE (arg_types));
arg_types = TREE_CHAIN (arg_types);
parms = DECL_CHAIN (DECL_ARGUMENTS (fn));
/* If this is a subobject constructor or destructor, our caller will
pass us a pointer to our VTT. */
if (CLASSTYPE_VBASECLASSES (DECL_CONTEXT (fn)))
{
parm = build_artificial_parm (fn, vtt_parm_identifier, vtt_parm_type);
/* First add it to DECL_ARGUMENTS between 'this' and the real args... */
DECL_CHAIN (parm) = parms;
parms = parm;
/* ...and then to TYPE_ARG_TYPES. */
arg_types = hash_tree_chain (vtt_parm_type, arg_types);
DECL_HAS_VTT_PARM_P (fn) = 1;
}
/* Then add the in-charge parm (before the VTT parm). */
parm = build_artificial_parm (fn, in_charge_identifier, integer_type_node);
DECL_CHAIN (parm) = parms;
parms = parm;
arg_types = hash_tree_chain (integer_type_node, arg_types);
/* Insert our new parameter(s) into the list. */
DECL_CHAIN (DECL_ARGUMENTS (fn)) = parms;
/* And rebuild the function type. */
fntype = build_method_type_directly (basetype, TREE_TYPE (TREE_TYPE (fn)),
arg_types);
if (TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn)))
fntype = build_exception_variant (fntype,
TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn)));
if (TYPE_ATTRIBUTES (TREE_TYPE (fn)))
fntype = (cp_build_type_attribute_variant
(fntype, TYPE_ATTRIBUTES (TREE_TYPE (fn))));
TREE_TYPE (fn) = fntype;
/* Now we've got the in-charge parameter. */
DECL_HAS_IN_CHARGE_PARM_P (fn) = 1;
}
/* Classes overload their constituent function names automatically.
When a function name is declared in a record structure,
its name is changed to it overloaded name. Since names for
constructors and destructors can conflict, we place a leading
'$' for destructors.
CNAME is the name of the class we are grokking for.
FUNCTION is a FUNCTION_DECL. It was created by `grokdeclarator'.
FLAGS contains bits saying what's special about today's
arguments. DTOR_FLAG == DESTRUCTOR.
If FUNCTION is a destructor, then we must add the `auto-delete' field
as a second parameter. There is some hair associated with the fact
that we must "declare" this variable in the manner consistent with the
way the rest of the arguments were declared.
QUALS are the qualifiers for the this pointer. */
void
grokclassfn (tree ctype, tree function, enum overload_flags flags)
{
tree fn_name = DECL_NAME (function);
/* Even within an `extern "C"' block, members get C++ linkage. See
[dcl.link] for details. */
SET_DECL_LANGUAGE (function, lang_cplusplus);
if (fn_name == NULL_TREE)
{
error ("name missing for member function");
fn_name = get_identifier ("<anonymous>");
DECL_NAME (function) = fn_name;
}
DECL_CONTEXT (function) = ctype;
if (flags == DTOR_FLAG)
DECL_CXX_DESTRUCTOR_P (function) = 1;
if (flags == DTOR_FLAG || DECL_CONSTRUCTOR_P (function))
maybe_retrofit_in_chrg (function);
}
/* Create an ARRAY_REF, checking for the user doing things backwards
along the way. DECLTYPE_P is for N3276, as in the parser. */
tree
grok_array_decl (location_t loc, tree array_expr, tree index_exp,
bool decltype_p)
{
tree type;
tree expr;
tree orig_array_expr = array_expr;
tree orig_index_exp = index_exp;
tree overload = NULL_TREE;
if (error_operand_p (array_expr) || error_operand_p (index_exp))
return error_mark_node;
if (processing_template_decl)
{
if (type_dependent_expression_p (array_expr)
|| type_dependent_expression_p (index_exp))
return build_min_nt_loc (loc, ARRAY_REF, array_expr, index_exp,
NULL_TREE, NULL_TREE);
array_expr = build_non_dependent_expr (array_expr);
index_exp = build_non_dependent_expr (index_exp);
}
type = TREE_TYPE (array_expr);
gcc_assert (type);
type = non_reference (type);
/* If they have an `operator[]', use that. */
if (MAYBE_CLASS_TYPE_P (type) || MAYBE_CLASS_TYPE_P (TREE_TYPE (index_exp)))
{
tsubst_flags_t complain = tf_warning_or_error;
if (decltype_p)
complain |= tf_decltype;
expr = build_new_op (loc, ARRAY_REF, LOOKUP_NORMAL, array_expr,
index_exp, NULL_TREE, &overload, complain);
}
else
{
tree p1, p2, i1, i2;
/* Otherwise, create an ARRAY_REF for a pointer or array type.
It is a little-known fact that, if `a' is an array and `i' is
an int, you can write `i[a]', which means the same thing as
`a[i]'. */
if (TREE_CODE (type) == ARRAY_TYPE || VECTOR_TYPE_P (type))
p1 = array_expr;
else
p1 = build_expr_type_conversion (WANT_POINTER, array_expr, false);
if (TREE_CODE (TREE_TYPE (index_exp)) == ARRAY_TYPE)
p2 = index_exp;
else
p2 = build_expr_type_conversion (WANT_POINTER, index_exp, false);
i1 = build_expr_type_conversion (WANT_INT | WANT_ENUM, array_expr,
false);
i2 = build_expr_type_conversion (WANT_INT | WANT_ENUM, index_exp,
false);
if ((p1 && i2) && (i1 && p2))
error ("ambiguous conversion for array subscript");
if (p1 && i2)
array_expr = p1, index_exp = i2;
else if (i1 && p2)
array_expr = p2, index_exp = i1;
else
{
error ("invalid types %<%T[%T]%> for array subscript",
type, TREE_TYPE (index_exp));
return error_mark_node;
}
if (array_expr == error_mark_node || index_exp == error_mark_node)
error ("ambiguous conversion for array subscript");
if (TREE_CODE (TREE_TYPE (array_expr)) == POINTER_TYPE)
array_expr = mark_rvalue_use (array_expr);
else
array_expr = mark_lvalue_use_nonread (array_expr);
index_exp = mark_rvalue_use (index_exp);
expr = build_array_ref (input_location, array_expr, index_exp);
}
if (processing_template_decl && expr != error_mark_node)
{
if (overload != NULL_TREE)
return (build_min_non_dep_op_overload
(ARRAY_REF, expr, overload, orig_array_expr, orig_index_exp));
return build_min_non_dep (ARRAY_REF, expr, orig_array_expr, orig_index_exp,
NULL_TREE, NULL_TREE);
}
return expr;
}
/* Given the cast expression EXP, checking out its validity. Either return
an error_mark_node if there was an unavoidable error, return a cast to
void for trying to delete a pointer w/ the value 0, or return the
call to delete. If DOING_VEC is true, we handle things differently
for doing an array delete.
Implements ARM $5.3.4. This is called from the parser. */
tree
delete_sanity (tree exp, tree size, bool doing_vec, int use_global_delete,
tsubst_flags_t complain)
{
tree t, type;
if (exp == error_mark_node)
return exp;
if (processing_template_decl)
{
t = build_min (DELETE_EXPR, void_type_node, exp, size);
DELETE_EXPR_USE_GLOBAL (t) = use_global_delete;
DELETE_EXPR_USE_VEC (t) = doing_vec;
TREE_SIDE_EFFECTS (t) = 1;
return t;
}
/* An array can't have been allocated by new, so complain. */
if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE)
warning (0, "deleting array %q#E", exp);
t = build_expr_type_conversion (WANT_POINTER, exp, true);
if (t == NULL_TREE || t == error_mark_node)
{
error ("type %q#T argument given to %<delete%>, expected pointer",
TREE_TYPE (exp));
return error_mark_node;
}
type = TREE_TYPE (t);
/* As of Valley Forge, you can delete a pointer to const. */
/* You can't delete functions. */
if (TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE)
{
error ("cannot delete a function. Only pointer-to-objects are "
"valid arguments to %<delete%>");
return error_mark_node;
}
/* Deleting ptr to void is undefined behavior [expr.delete/3]. */
if (VOID_TYPE_P (TREE_TYPE (type)))
{
warning (OPT_Wdelete_incomplete, "deleting %qT is undefined", type);
doing_vec = 0;
}
/* Deleting a pointer with the value zero is valid and has no effect. */
if (integer_zerop (t))
return build1 (NOP_EXPR, void_type_node, t);
if (doing_vec)
return build_vec_delete (t, /*maxindex=*/NULL_TREE,
sfk_deleting_destructor,
use_global_delete, complain);
else
return build_delete (type, t, sfk_deleting_destructor,
LOOKUP_NORMAL, use_global_delete,
complain);
}
/* Report an error if the indicated template declaration is not the
sort of thing that should be a member template. */
void
check_member_template (tree tmpl)
{
tree decl;
gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL);
decl = DECL_TEMPLATE_RESULT (tmpl);
if (TREE_CODE (decl) == FUNCTION_DECL
|| DECL_ALIAS_TEMPLATE_P (tmpl)
|| (TREE_CODE (decl) == TYPE_DECL
&& MAYBE_CLASS_TYPE_P (TREE_TYPE (decl))))
{
/* The parser rejects template declarations in local classes
(with the exception of generic lambdas). */
gcc_assert (!current_function_decl || LAMBDA_FUNCTION_P (decl));
/* The parser rejects any use of virtual in a function template. */
gcc_assert (!(TREE_CODE (decl) == FUNCTION_DECL
&& DECL_VIRTUAL_P (decl)));
/* The debug-information generating code doesn't know what to do
with member templates. */
DECL_IGNORED_P (tmpl) = 1;
}
else if (variable_template_p (tmpl))
/* OK */;
else
error ("template declaration of %q#D", decl);
}
/* Sanity check: report error if this function FUNCTION is not
really a member of the class (CTYPE) it is supposed to belong to.
TEMPLATE_PARMS is used to specify the template parameters of a member
template passed as FUNCTION_DECL. If the member template is passed as a
TEMPLATE_DECL, it can be NULL since the parameters can be extracted
from the declaration. If the function is not a function template, it
must be NULL.
It returns the original declaration for the function, NULL_TREE if
no declaration was found, error_mark_node if an error was emitted. */
tree
check_classfn (tree ctype, tree function, tree template_parms)
{
if (DECL_USE_TEMPLATE (function)
&& !(TREE_CODE (function) == TEMPLATE_DECL
&& DECL_TEMPLATE_SPECIALIZATION (function))
&& DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (function)))
/* Since this is a specialization of a member template,
we're not going to find the declaration in the class.
For example, in:
struct S { template <typename T> void f(T); };
template <> void S::f(int);
we're not going to find `S::f(int)', but there's no
reason we should, either. We let our callers know we didn't
find the method, but we don't complain. */
return NULL_TREE;
/* Basic sanity check: for a template function, the template parameters
either were not passed, or they are the same of DECL_TEMPLATE_PARMS. */
if (TREE_CODE (function) == TEMPLATE_DECL)
{
if (template_parms
&& !comp_template_parms (template_parms,
DECL_TEMPLATE_PARMS (function)))
{
error ("template parameter lists provided don%'t match the "
"template parameters of %qD", function);
return error_mark_node;
}
template_parms = DECL_TEMPLATE_PARMS (function);
}
/* OK, is this a definition of a member template? */
bool is_template = (template_parms != NULL_TREE);
/* [temp.mem]
A destructor shall not be a member template. */
if (DECL_DESTRUCTOR_P (function) && is_template)
{
error ("destructor %qD declared as member template", function);
return error_mark_node;
}
/* We must enter the scope here, because conversion operators are
named by target type, and type equivalence relies on typenames
resolving within the scope of CTYPE. */
tree pushed_scope = push_scope (ctype);
tree matched = NULL_TREE;
tree fns = get_class_binding (ctype, DECL_NAME (function));
for (ovl_iterator iter (fns); !matched && iter; ++iter)
{
tree fndecl = *iter;
/* A member template definition only matches a member template
declaration. */
if (is_template != (TREE_CODE (fndecl) == TEMPLATE_DECL))
continue;
if (!DECL_DECLARES_FUNCTION_P (fndecl))
continue;
tree p1 = TYPE_ARG_TYPES (TREE_TYPE (function));
tree p2 = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
/* We cannot simply call decls_match because this doesn't work
for static member functions that are pretending to be
methods, and because the name may have been changed by
asm("new_name"). */
/* Get rid of the this parameter on functions that become
static. */
if (DECL_STATIC_FUNCTION_P (fndecl)
&& TREE_CODE (TREE_TYPE (function)) == METHOD_TYPE)
p1 = TREE_CHAIN (p1);
/* ref-qualifier or absence of same must match. */
if (type_memfn_rqual (TREE_TYPE (function))
!= type_memfn_rqual (TREE_TYPE (fndecl)))
continue;
// Include constraints in the match.
tree c1 = get_constraints (function);
tree c2 = get_constraints (fndecl);
/* While finding a match, same types and params are not enough
if the function is versioned. Also check version ("target")
attributes. */
if (same_type_p (TREE_TYPE (TREE_TYPE (function)),
TREE_TYPE (TREE_TYPE (fndecl)))
&& compparms (p1, p2)
&& !targetm.target_option.function_versions (function, fndecl)
&& (!is_template
|| comp_template_parms (template_parms,
DECL_TEMPLATE_PARMS (fndecl)))
&& equivalent_constraints (c1, c2)
&& (DECL_TEMPLATE_SPECIALIZATION (function)
== DECL_TEMPLATE_SPECIALIZATION (fndecl))
&& (!DECL_TEMPLATE_SPECIALIZATION (function)
|| (DECL_TI_TEMPLATE (function) == DECL_TI_TEMPLATE (fndecl))))
matched = fndecl;
}
if (!matched)
{
if (!COMPLETE_TYPE_P (ctype))
cxx_incomplete_type_error (function, ctype);
else
{
if (DECL_CONV_FN_P (function))
fns = get_class_binding (ctype, conv_op_identifier);
error_at (DECL_SOURCE_LOCATION (function),
"no declaration matches %q#D", function);
if (fns)
print_candidates (fns);
else if (DECL_CONV_FN_P (function))
inform (DECL_SOURCE_LOCATION (function),
"no conversion operators declared");
else
inform (DECL_SOURCE_LOCATION (function),
"no functions named %qD", function);
inform (DECL_SOURCE_LOCATION (TYPE_NAME (ctype)),
"%#qT defined here", ctype);
}
matched = error_mark_node;
}
if (pushed_scope)
pop_scope (pushed_scope);
return matched;
}
/* DECL is a function with vague linkage. Remember it so that at the
end of the translation unit we can decide whether or not to emit
it. */
void
note_vague_linkage_fn (tree decl)
{
if (processing_template_decl)
return;
DECL_DEFER_OUTPUT (decl) = 1;
vec_safe_push (deferred_fns, decl);
}
/* As above, but for variable template instantiations. */
void
note_variable_template_instantiation (tree decl)
{
vec_safe_push (pending_statics, decl);
}
/* We have just processed the DECL, which is a static data member.
The other parameters are as for cp_finish_decl. */
void
finish_static_data_member_decl (tree decl,
tree init, bool init_const_expr_p,
tree asmspec_tree,
int flags)
{
DECL_CONTEXT (decl) = current_class_type;
/* We cannot call pushdecl here, because that would fill in the
TREE_CHAIN of our decl. Instead, we modify cp_finish_decl to do
the right thing, namely, to put this decl out straight away. */
if (! processing_template_decl)
vec_safe_push (pending_statics, decl);
if (LOCAL_CLASS_P (current_class_type)
/* We already complained about the template definition. */
&& !DECL_TEMPLATE_INSTANTIATION (decl))
permerror (input_location, "local class %q#T shall not have static data member %q#D",
current_class_type, decl);
else
for (tree t = current_class_type; TYPE_P (t);
t = CP_TYPE_CONTEXT (t))
if (TYPE_UNNAMED_P (t))
{
if (permerror (DECL_SOURCE_LOCATION (decl),
"static data member %qD in unnamed class", decl))
inform (DECL_SOURCE_LOCATION (TYPE_NAME (t)),
"unnamed class defined here");
break;
}
DECL_IN_AGGR_P (decl) = 1;
if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
&& TYPE_DOMAIN (TREE_TYPE (decl)) == NULL_TREE)
SET_VAR_HAD_UNKNOWN_BOUND (decl);
if (init)
{
/* Similarly to start_decl_1, we want to complete the type in order
to do the right thing in cp_apply_type_quals_to_decl, possibly
clear TYPE_QUAL_CONST (c++/65579). */
tree type = TREE_TYPE (decl) = complete_type (TREE_TYPE (decl));
cp_apply_type_quals_to_decl (cp_type_quals (type), decl);
}
cp_finish_decl (decl, init, init_const_expr_p, asmspec_tree, flags);
}
/* DECLARATOR and DECLSPECS correspond to a class member. The other
parameters are as for cp_finish_decl. Return the DECL for the
class member declared. */
tree
grokfield (const cp_declarator *declarator,
cp_decl_specifier_seq *declspecs,
tree init, bool init_const_expr_p,
tree asmspec_tree,
tree attrlist)
{
tree value;
const char *asmspec = 0;
int flags;
tree name;
if (init
&& TREE_CODE (init) == TREE_LIST
&& TREE_VALUE (init) == error_mark_node
&& TREE_CHAIN (init) == NULL_TREE)
init = NULL_TREE;
value = grokdeclarator (declarator, declspecs, FIELD, init != 0, &attrlist);
if (! value || value == error_mark_node)
/* friend or constructor went bad. */
return error_mark_node;
if (TREE_TYPE (value) == error_mark_node)
return value;
if (TREE_CODE (value) == TYPE_DECL && init)
{
error ("typedef %qD is initialized (use decltype instead)", value);
init = NULL_TREE;
}
/* Pass friendly classes back. */
if (value == void_type_node)
return value;
name = DECL_NAME (value);
if (name != NULL_TREE)
{
if (TREE_CODE (name) == TEMPLATE_ID_EXPR)
{
error ("explicit template argument list not allowed");
return error_mark_node;
}
if (IDENTIFIER_POINTER (name)[0] == '_'
&& id_equal (name, "_vptr"))
error ("member %qD conflicts with virtual function table field name",
value);
}
/* Stash away type declarations. */
if (TREE_CODE (value) == TYPE_DECL)
{
DECL_NONLOCAL (value) = 1;
DECL_CONTEXT (value) = current_class_type;
if (attrlist)
{
int attrflags = 0;
/* If this is a typedef that names the class for linkage purposes
(7.1.3p8), apply any attributes directly to the type. */
if (OVERLOAD_TYPE_P (TREE_TYPE (value))
&& value == TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (value))))
attrflags = ATTR_FLAG_TYPE_IN_PLACE;
cplus_decl_attributes (&value, attrlist, attrflags);
}
if (decl_spec_seq_has_spec_p (declspecs, ds_typedef)
&& TREE_TYPE (value) != error_mark_node
&& TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (value))) != value)
set_underlying_type (value);
/* It's important that push_template_decl below follows
set_underlying_type above so that the created template
carries the properly set type of VALUE. */
if (processing_template_decl)
value = push_template_decl (value);
record_locally_defined_typedef (value);
return value;
}
int friendp = decl_spec_seq_has_spec_p (declspecs, ds_friend);
if (!friendp && DECL_IN_AGGR_P (value))
{
error ("%qD is already defined in %qT", value, DECL_CONTEXT (value));
return void_type_node;
}
if (asmspec_tree && asmspec_tree != error_mark_node)
asmspec = TREE_STRING_POINTER (asmspec_tree);
if (init)
{
if (TREE_CODE (value) == FUNCTION_DECL)
{
if (init == ridpointers[(int)RID_DELETE])
{
if (friendp && decl_defined_p (value))
{
error ("redefinition of %q#D", value);
inform (DECL_SOURCE_LOCATION (value),
"%q#D previously defined here", value);
}
else
{
DECL_DELETED_FN (value) = 1;
DECL_DECLARED_INLINE_P (value) = 1;
DECL_INITIAL (value) = error_mark_node;
}
}
else if (init == ridpointers[(int)RID_DEFAULT])
{
if (defaultable_fn_check (value))
{
DECL_DEFAULTED_FN (value) = 1;
DECL_INITIALIZED_IN_CLASS_P (value) = 1;
DECL_DECLARED_INLINE_P (value) = 1;
}
}
else if (TREE_CODE (init) == DEFAULT_ARG)
error ("invalid initializer for member function %qD", value);
else if (TREE_CODE (TREE_TYPE (value)) == METHOD_TYPE)
{
if (integer_zerop (init))
DECL_PURE_VIRTUAL_P (value) = 1;
else if (error_operand_p (init))
; /* An error has already been reported. */
else
error ("invalid initializer for member function %qD",
value);
}
else
{
gcc_assert (TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE);
if (friendp)
error ("initializer specified for friend function %qD",
value);
else
error ("initializer specified for static member function %qD",
value);
}
}
else if (TREE_CODE (value) == FIELD_DECL)
/* C++11 NSDMI, keep going. */;
else if (!VAR_P (value))
gcc_unreachable ();
}
/* Pass friend decls back. */
if ((TREE_CODE (value) == FUNCTION_DECL
|| TREE_CODE (value) == TEMPLATE_DECL)
&& DECL_CONTEXT (value) != current_class_type)
return value;
/* Need to set this before push_template_decl. */
if (VAR_P (value))
DECL_CONTEXT (value) = current_class_type;
if (processing_template_decl && VAR_OR_FUNCTION_DECL_P (value))
{
value = push_template_decl (value);
if (error_operand_p (value))
return error_mark_node;
}
if (attrlist)
cplus_decl_attributes (&value, attrlist, 0);
if (init && DIRECT_LIST_INIT_P (init))
flags = LOOKUP_NORMAL;
else
flags = LOOKUP_IMPLICIT;
switch (TREE_CODE (value))
{
case VAR_DECL:
finish_static_data_member_decl (value, init, init_const_expr_p,
asmspec_tree, flags);
return value;
case FIELD_DECL:
if (asmspec)
error ("%<asm%> specifiers are not permitted on non-static data members");
if (DECL_INITIAL (value) == error_mark_node)
init = error_mark_node;
cp_finish_decl (value, init, /*init_const_expr_p=*/false,
NULL_TREE, flags);
DECL_IN_AGGR_P (value) = 1;
return value;
case FUNCTION_DECL:
if (asmspec)
set_user_assembler_name (value, asmspec);
cp_finish_decl (value,
/*init=*/NULL_TREE,
/*init_const_expr_p=*/false,
asmspec_tree, flags);
/* Pass friends back this way. */
if (DECL_FRIEND_P (value))
return void_type_node;
DECL_IN_AGGR_P (value) = 1;
return value;
default:
gcc_unreachable ();
}
return NULL_TREE;
}
/* Like `grokfield', but for bitfields.
WIDTH is the width of the bitfield, a constant expression.
The other parameters are as for grokfield. */
tree
grokbitfield (const cp_declarator *declarator,
cp_decl_specifier_seq *declspecs, tree width, tree init,
tree attrlist)
{
tree value = grokdeclarator (declarator, declspecs, BITFIELD,
init != NULL_TREE, &attrlist);
if (value == error_mark_node)
return NULL_TREE; /* friends went bad. */
if (TREE_TYPE (value) == error_mark_node)
return value;
/* Pass friendly classes back. */
if (VOID_TYPE_P (value))
return void_type_node;
if (!INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (value))
&& (POINTER_TYPE_P (value)
|| !dependent_type_p (TREE_TYPE (value))))
{
error ("bit-field %qD with non-integral type", value);
return error_mark_node;
}
if (TREE_CODE (value) == TYPE_DECL)
{
error ("cannot declare %qD to be a bit-field type", value);
return NULL_TREE;
}
/* Usually, finish_struct_1 catches bitfields with invalid types.
But, in the case of bitfields with function type, we confuse
ourselves into thinking they are member functions, so we must
check here. */
if (TREE_CODE (value) == FUNCTION_DECL)
{
error ("cannot declare bit-field %qD with function type",
DECL_NAME (value));
return NULL_TREE;
}
if (width && TYPE_WARN_IF_NOT_ALIGN (TREE_TYPE (value)))
{
error ("cannot declare bit-field %qD with %<warn_if_not_aligned%> type",
DECL_NAME (value));
return NULL_TREE;
}
if (DECL_IN_AGGR_P (value))
{
error ("%qD is already defined in the class %qT", value,
DECL_CONTEXT (value));
return void_type_node;
}
if (TREE_STATIC (value))
{
error ("static member %qD cannot be a bit-field", value);
return NULL_TREE;
}
int flags = LOOKUP_IMPLICIT;
if (init && DIRECT_LIST_INIT_P (init))
flags = LOOKUP_NORMAL;
cp_finish_decl (value, init, false, NULL_TREE, flags);
if (width != error_mark_node)
{
/* The width must be an integer type. */
if (!type_dependent_expression_p (width)
&& !INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (width)))
error ("width of bit-field %qD has non-integral type %qT", value,
TREE_TYPE (width));
else
{
/* Temporarily stash the width in DECL_BIT_FIELD_REPRESENTATIVE.
check_bitfield_decl picks it from there later and sets DECL_SIZE
accordingly. */
DECL_BIT_FIELD_REPRESENTATIVE (value) = width;
SET_DECL_C_BIT_FIELD (value);
}
}
DECL_IN_AGGR_P (value) = 1;
if (attrlist)
cplus_decl_attributes (&value, attrlist, /*flags=*/0);
return value;
}
/* Returns true iff ATTR is an attribute which needs to be applied at
instantiation time rather than template definition time. */
static bool
is_late_template_attribute (tree attr, tree decl)
{
tree name = get_attribute_name (attr);
tree args = TREE_VALUE (attr);
const struct attribute_spec *spec = lookup_attribute_spec (name);
tree arg;
if (!spec)
/* Unknown attribute. */
return false;
/* Attribute weak handling wants to write out assembly right away. */
if (is_attribute_p ("weak", name))
return true;
/* Attributes used and unused are applied directly to typedefs for the
benefit of maybe_warn_unused_local_typedefs. */
if (TREE_CODE (decl) == TYPE_DECL
&& (is_attribute_p ("unused", name)
|| is_attribute_p ("used", name)))
return false;
/* Attribute tls_model wants to modify the symtab. */
if (is_attribute_p ("tls_model", name))
return true;
/* #pragma omp declare simd attribute needs to be always deferred. */
if (flag_openmp
&& is_attribute_p ("omp declare simd", name))
return true;
/* An attribute pack is clearly dependent. */
if (args && PACK_EXPANSION_P (args))
return true;
/* If any of the arguments are dependent expressions, we can't evaluate
the attribute until instantiation time. */
for (arg = args; arg; arg = TREE_CHAIN (arg))
{
tree t = TREE_VALUE (arg);
/* If the first attribute argument is an identifier, only consider
second and following arguments. Attributes like mode, format,
cleanup and several target specific attributes aren't late
just because they have an IDENTIFIER_NODE as first argument. */
if (arg == args && attribute_takes_identifier_p (name)
&& identifier_p (t))
continue;
if (value_dependent_expression_p (t)
|| type_dependent_expression_p (t))
return true;
}
if (TREE_CODE (decl) == TYPE_DECL
|| TYPE_P (decl)
|| spec->type_required)
{
tree type = TYPE_P (decl) ? decl : TREE_TYPE (decl);
/* We can't apply any attributes to a completely unknown type until
instantiation time. */
enum tree_code code = TREE_CODE (type);
if (code == TEMPLATE_TYPE_PARM
|| code == BOUND_TEMPLATE_TEMPLATE_PARM
|| code == TYPENAME_TYPE)
return true;
/* Also defer most attributes on dependent types. This is not
necessary in all cases, but is the better default. */
else if (dependent_type_p (type)
/* But some attributes specifically apply to templates. */
&& !is_attribute_p ("abi_tag", name)
&& !is_attribute_p ("deprecated", name)
&& !is_attribute_p ("visibility", name))
return true;
else
return false;
}
else
return false;
}
/* ATTR_P is a list of attributes. Remove any attributes which need to be
applied at instantiation time and return them. If IS_DEPENDENT is true,
the declaration itself is dependent, so all attributes should be applied
at instantiation time. */
static tree
splice_template_attributes (tree *attr_p, tree decl)
{
tree *p = attr_p;
tree late_attrs = NULL_TREE;
tree *q = &late_attrs;
if (!p)
return NULL_TREE;
for (; *p; )
{
if (is_late_template_attribute (*p, decl))
{
ATTR_IS_DEPENDENT (*p) = 1;
*q = *p;
*p = TREE_CHAIN (*p);
q = &TREE_CHAIN (*q);
*q = NULL_TREE;
}
else
p = &TREE_CHAIN (*p);
}
return late_attrs;
}
/* Remove any late attributes from the list in ATTR_P and attach them to
DECL_P. */
static void
save_template_attributes (tree *attr_p, tree *decl_p, int flags)
{
tree *q;
if (attr_p && *attr_p == error_mark_node)
return;
tree late_attrs = splice_template_attributes (attr_p, *decl_p);
if (!late_attrs)
return;
if (DECL_P (*decl_p))
q = &DECL_ATTRIBUTES (*decl_p);
else
q = &TYPE_ATTRIBUTES (*decl_p);
tree old_attrs = *q;
/* Merge the late attributes at the beginning with the attribute
list. */
late_attrs = merge_attributes (late_attrs, *q);
if (*q != late_attrs
&& !DECL_P (*decl_p)
&& !(flags & ATTR_FLAG_TYPE_IN_PLACE))
{
if (!dependent_type_p (*decl_p))
*decl_p = cp_build_type_attribute_variant (*decl_p, late_attrs);
else
{
*decl_p = build_variant_type_copy (*decl_p);
TYPE_ATTRIBUTES (*decl_p) = late_attrs;
}
}
else
*q = late_attrs;
if (!DECL_P (*decl_p) && *decl_p == TYPE_MAIN_VARIANT (*decl_p))
{
/* We've added new attributes directly to the main variant, so
now we need to update all of the other variants to include
these new attributes. */
tree variant;
for (variant = TYPE_NEXT_VARIANT (*decl_p); variant;
variant = TYPE_NEXT_VARIANT (variant))
{
gcc_assert (TYPE_ATTRIBUTES (variant) == old_attrs);
TYPE_ATTRIBUTES (variant) = TYPE_ATTRIBUTES (*decl_p);
}
}
}
/* True if ATTRS contains any dependent attributes that affect type
identity. */
bool
any_dependent_type_attributes_p (tree attrs)
{
for (tree a = attrs; a; a = TREE_CHAIN (a))
if (ATTR_IS_DEPENDENT (a))
{
const attribute_spec *as = lookup_attribute_spec (TREE_PURPOSE (a));
if (as && as->affects_type_identity)
return true;
}
return false;
}
/* Return true iff ATTRS are acceptable attributes to be applied in-place
to a typedef which gives a previously unnamed class or enum a name for
linkage purposes. */
bool
attributes_naming_typedef_ok (tree attrs)
{
for (; attrs; attrs = TREE_CHAIN (attrs))
{
tree name = get_attribute_name (attrs);
if (is_attribute_p ("vector_size", name))
return false;
}
return true;
}
/* Like reconstruct_complex_type, but handle also template trees. */
tree
cp_reconstruct_complex_type (tree type, tree bottom)
{
tree inner, outer;
bool late_return_type_p = false;
if (TYPE_PTR_P (type))
{
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
outer = build_pointer_type_for_mode (inner, TYPE_MODE (type),
TYPE_REF_CAN_ALIAS_ALL (type));
}
else if (TREE_CODE (type) == REFERENCE_TYPE)
{
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
outer = build_reference_type_for_mode (inner, TYPE_MODE (type),
TYPE_REF_CAN_ALIAS_ALL (type));
}
else if (TREE_CODE (type) == ARRAY_TYPE)
{
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
outer = build_cplus_array_type (inner, TYPE_DOMAIN (type));
/* Don't call cp_build_qualified_type on ARRAY_TYPEs, the
element type qualification will be handled by the recursive
cp_reconstruct_complex_type call and cp_build_qualified_type
for ARRAY_TYPEs changes the element type. */
return outer;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (type);
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
outer = build_function_type (inner, TYPE_ARG_TYPES (type));
outer = apply_memfn_quals (outer,
type_memfn_quals (type),
type_memfn_rqual (type));
}
else if (TREE_CODE (type) == METHOD_TYPE)
{
late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (type);
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
/* The build_method_type_directly() routine prepends 'this' to argument list,
so we must compensate by getting rid of it. */
outer
= build_method_type_directly
(class_of_this_parm (type), inner,
TREE_CHAIN (TYPE_ARG_TYPES (type)));
}
else if (TREE_CODE (type) == OFFSET_TYPE)
{
inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom);
outer = build_offset_type (TYPE_OFFSET_BASETYPE (type), inner);
}
else
return bottom;
if (TYPE_ATTRIBUTES (type))
outer = cp_build_type_attribute_variant (outer, TYPE_ATTRIBUTES (type));
outer = cp_build_qualified_type (outer, cp_type_quals (type));
if (late_return_type_p)
TYPE_HAS_LATE_RETURN_TYPE (outer) = 1;
return outer;
}
/* Replaces any constexpr expression that may be into the attributes
arguments with their reduced value. */
static void
cp_check_const_attributes (tree attributes)
{
if (attributes == error_mark_node)
return;
tree attr;
for (attr = attributes; attr; attr = TREE_CHAIN (attr))
{
tree arg;
for (arg = TREE_VALUE (attr); arg; arg = TREE_CHAIN (arg))
{
tree expr = TREE_VALUE (arg);
if (EXPR_P (expr))
TREE_VALUE (arg) = fold_non_dependent_expr (expr);
}
}
}
/* Return true if TYPE is an OpenMP mappable type. */
bool
cp_omp_mappable_type (tree type)
{
/* Mappable type has to be complete. */
if (type == error_mark_node || !COMPLETE_TYPE_P (type))
return false;
/* Arrays have mappable type if the elements have mappable type. */
while (TREE_CODE (type) == ARRAY_TYPE)
type = TREE_TYPE (type);
/* A mappable type cannot contain virtual members. */
if (CLASS_TYPE_P (type) && CLASSTYPE_VTABLES (type))
return false;
/* All data members must be non-static. */
if (CLASS_TYPE_P (type))
{
tree field;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (VAR_P (field))
return false;
/* All fields must have mappable types. */
else if (TREE_CODE (field) == FIELD_DECL
&& !cp_omp_mappable_type (TREE_TYPE (field)))
return false;
}
return true;
}
/* Return the last pushed declaration for the symbol DECL or NULL
when no such declaration exists. */
static tree
find_last_decl (tree decl)
{
tree last_decl = NULL_TREE;
if (tree name = DECL_P (decl) ? DECL_NAME (decl) : NULL_TREE)
{
/* Look up the declaration in its scope. */
tree pushed_scope = NULL_TREE;
if (tree ctype = DECL_CONTEXT (decl))
pushed_scope = push_scope (ctype);
last_decl = lookup_name (name);
if (pushed_scope)
pop_scope (pushed_scope);
/* The declaration may be a member conversion operator
or a bunch of overfloads (handle the latter below). */
if (last_decl && BASELINK_P (last_decl))
last_decl = BASELINK_FUNCTIONS (last_decl);
}
if (!last_decl)
return NULL_TREE;
if (DECL_P (last_decl) || TREE_CODE (last_decl) == OVERLOAD)
{
/* A set of overloads of the same function. */
for (lkp_iterator iter (last_decl); iter; ++iter)
{
if (TREE_CODE (*iter) == OVERLOAD)
continue;
if (decls_match (decl, *iter, /*record_decls=*/false))
return *iter;
}
return NULL_TREE;
}
return NULL_TREE;
}
/* Like decl_attributes, but handle C++ complexity. */
void
cplus_decl_attributes (tree *decl, tree attributes, int flags)
{
if (*decl == NULL_TREE || *decl == void_type_node
|| *decl == error_mark_node)
return;
/* Add implicit "omp declare target" attribute if requested. */
if (scope_chain->omp_declare_target_attribute
&& ((VAR_P (*decl)
&& (TREE_STATIC (*decl) || DECL_EXTERNAL (*decl)))
|| TREE_CODE (*decl) == FUNCTION_DECL))
{
if (VAR_P (*decl)
&& DECL_CLASS_SCOPE_P (*decl))
error ("%q+D static data member inside of declare target directive",
*decl);
else if (VAR_P (*decl)
&& (processing_template_decl
|| !cp_omp_mappable_type (TREE_TYPE (*decl))))
attributes = tree_cons (get_identifier ("omp declare target implicit"),
NULL_TREE, attributes);
else
attributes = tree_cons (get_identifier ("omp declare target"),
NULL_TREE, attributes);
}
if (processing_template_decl)
{
if (check_for_bare_parameter_packs (attributes))
return;
save_template_attributes (&attributes, decl, flags);
}
cp_check_const_attributes (attributes);
if (TREE_CODE (*decl) == TEMPLATE_DECL)
decl = &DECL_TEMPLATE_RESULT (*decl);
if (TREE_TYPE (*decl) && TYPE_PTRMEMFUNC_P (TREE_TYPE (*decl)))
{
attributes
= decl_attributes (decl, attributes, flags | ATTR_FLAG_FUNCTION_NEXT);
decl_attributes (&TYPE_PTRMEMFUNC_FN_TYPE_RAW (TREE_TYPE (*decl)),
attributes, flags);
}
else
{
tree last_decl = find_last_decl (*decl);
decl_attributes (decl, attributes, flags, last_decl);
}
if (TREE_CODE (*decl) == TYPE_DECL)
SET_IDENTIFIER_TYPE_VALUE (DECL_NAME (*decl), TREE_TYPE (*decl));
/* Propagate deprecation out to the template. */
if (TREE_DEPRECATED (*decl))
if (tree ti = get_template_info (*decl))
{
tree tmpl = TI_TEMPLATE (ti);
tree pattern = (TYPE_P (*decl) ? TREE_TYPE (tmpl)
: DECL_TEMPLATE_RESULT (tmpl));
if (*decl == pattern)
TREE_DEPRECATED (tmpl) = true;
}
}
/* Walks through the namespace- or function-scope anonymous union
OBJECT, with the indicated TYPE, building appropriate VAR_DECLs.
Returns one of the fields for use in the mangled name. */
static tree
build_anon_union_vars (tree type, tree object)
{
tree main_decl = NULL_TREE;
tree field;
/* Rather than write the code to handle the non-union case,
just give an error. */
if (TREE_CODE (type) != UNION_TYPE)
{
error ("anonymous struct not inside named type");
return error_mark_node;
}
for (field = TYPE_FIELDS (type);
field != NULL_TREE;
field = DECL_CHAIN (field))
{
tree decl;
tree ref;
if (DECL_ARTIFICIAL (field))
continue;
if (TREE_CODE (field) != FIELD_DECL)
{
permerror (DECL_SOURCE_LOCATION (field),
"%q#D invalid; an anonymous union can only "
"have non-static data members", field);
continue;
}
if (TREE_PRIVATE (field))
permerror (DECL_SOURCE_LOCATION (field),
"private member %q#D in anonymous union", field);
else if (TREE_PROTECTED (field))
permerror (DECL_SOURCE_LOCATION (field),
"protected member %q#D in anonymous union", field);
if (processing_template_decl)
ref = build_min_nt_loc (UNKNOWN_LOCATION, COMPONENT_REF, object,
DECL_NAME (field), NULL_TREE);
else
ref = build_class_member_access_expr (object, field, NULL_TREE,
false, tf_warning_or_error);
if (DECL_NAME (field))
{
tree base;
decl = build_decl (input_location,
VAR_DECL, DECL_NAME (field), TREE_TYPE (field));
DECL_ANON_UNION_VAR_P (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
base = get_base_address (object);
TREE_PUBLIC (decl) = TREE_PUBLIC (base);
TREE_STATIC (decl) = TREE_STATIC (base);
DECL_EXTERNAL (decl) = DECL_EXTERNAL (base);
SET_DECL_VALUE_EXPR (decl, ref);
DECL_HAS_VALUE_EXPR_P (decl) = 1;
decl = pushdecl (decl);
}
else if (ANON_AGGR_TYPE_P (TREE_TYPE (field)))
decl = build_anon_union_vars (TREE_TYPE (field), ref);
else
decl = 0;
if (main_decl == NULL_TREE)
main_decl = decl;
}
return main_decl;
}
/* Finish off the processing of a UNION_TYPE structure. If the union is an
anonymous union, then all members must be laid out together. PUBLIC_P
is nonzero if this union is not declared static. */
void
finish_anon_union (tree anon_union_decl)
{
tree type;
tree main_decl;
bool public_p;
if (anon_union_decl == error_mark_node)
return;
type = TREE_TYPE (anon_union_decl);
public_p = TREE_PUBLIC (anon_union_decl);
/* The VAR_DECL's context is the same as the TYPE's context. */
DECL_CONTEXT (anon_union_decl) = DECL_CONTEXT (TYPE_NAME (type));
if (TYPE_FIELDS (type) == NULL_TREE)
return;
if (public_p)
{
error ("namespace-scope anonymous aggregates must be static");
return;
}
main_decl = build_anon_union_vars (type, anon_union_decl);
if (main_decl == error_mark_node)
return;
if (main_decl == NULL_TREE)
{
pedwarn (input_location, 0, "anonymous union with no members");
return;
}
if (!processing_template_decl)
{
/* Use main_decl to set the mangled name. */
DECL_NAME (anon_union_decl) = DECL_NAME (main_decl);
maybe_commonize_var (anon_union_decl);
if (TREE_STATIC (anon_union_decl) || DECL_EXTERNAL (anon_union_decl))
mangle_decl (anon_union_decl);
DECL_NAME (anon_union_decl) = NULL_TREE;
}
pushdecl (anon_union_decl);
cp_finish_decl (anon_union_decl, NULL_TREE, false, NULL_TREE, 0);
}
/* Auxiliary functions to make type signatures for
`operator new' and `operator delete' correspond to
what compiler will be expecting. */
tree
coerce_new_type (tree type)
{
int e = 0;
tree args = TYPE_ARG_TYPES (type);
gcc_assert (TREE_CODE (type) == FUNCTION_TYPE);
if (!same_type_p (TREE_TYPE (type), ptr_type_node))
{
e = 1;
error ("%<operator new%> must return type %qT", ptr_type_node);
}
if (args && args != void_list_node)
{
if (TREE_PURPOSE (args))
{
/* [basic.stc.dynamic.allocation]
The first parameter shall not have an associated default
argument. */
error ("the first parameter of %<operator new%> cannot "
"have a default argument");
/* Throw away the default argument. */
TREE_PURPOSE (args) = NULL_TREE;
}
if (!same_type_p (TREE_VALUE (args), size_type_node))
{
e = 2;
args = TREE_CHAIN (args);
}
}
else
e = 2;
if (e == 2)
permerror (input_location, "%<operator new%> takes type %<size_t%> (%qT) "
"as first parameter", size_type_node);
switch (e)
{
case 2:
args = tree_cons (NULL_TREE, size_type_node, args);
/* Fall through. */
case 1:
type = build_exception_variant
(build_function_type (ptr_type_node, args),
TYPE_RAISES_EXCEPTIONS (type));
/* Fall through. */
default:;
}
return type;
}
tree
coerce_delete_type (tree type)
{
int e = 0;
tree args = TYPE_ARG_TYPES (type);
gcc_assert (TREE_CODE (type) == FUNCTION_TYPE);
if (!same_type_p (TREE_TYPE (type), void_type_node))
{
e = 1;
error ("%<operator delete%> must return type %qT", void_type_node);
}
if (!args || args == void_list_node
|| !same_type_p (TREE_VALUE (args), ptr_type_node))
{
e = 2;
if (args && args != void_list_node)
args = TREE_CHAIN (args);
error ("%<operator delete%> takes type %qT as first parameter",
ptr_type_node);
}
switch (e)
{
case 2:
args = tree_cons (NULL_TREE, ptr_type_node, args);
/* Fall through. */
case 1:
type = build_exception_variant
(build_function_type (void_type_node, args),
TYPE_RAISES_EXCEPTIONS (type));
/* Fall through. */
default:;
}
return type;
}
/* DECL is a VAR_DECL for a vtable: walk through the entries in the vtable
and mark them as needed. */
static void
mark_vtable_entries (tree decl)
{
tree fnaddr;
unsigned HOST_WIDE_INT idx;
/* It's OK for the vtable to refer to deprecated virtual functions. */
warning_sentinel w(warn_deprecated_decl);
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (DECL_INITIAL (decl)),
idx, fnaddr)
{
tree fn;
STRIP_NOPS (fnaddr);
if (TREE_CODE (fnaddr) != ADDR_EXPR
&& TREE_CODE (fnaddr) != FDESC_EXPR)
/* This entry is an offset: a virtual base class offset, a
virtual call offset, an RTTI offset, etc. */
continue;
fn = TREE_OPERAND (fnaddr, 0);
TREE_ADDRESSABLE (fn) = 1;
/* When we don't have vcall offsets, we output thunks whenever
we output the vtables that contain them. With vcall offsets,
we know all the thunks we'll need when we emit a virtual
function, so we emit the thunks there instead. */
if (DECL_THUNK_P (fn))
use_thunk (fn, /*emit_p=*/0);
/* Set the location, as marking the function could cause
instantiation. We do not need to preserve the incoming
location, as we're called from c_parse_final_cleanups, which
takes care of that. */
input_location = DECL_SOURCE_LOCATION (fn);
mark_used (fn);
}
}
/* Set DECL up to have the closest approximation of "initialized common"
linkage available. */
void
comdat_linkage (tree decl)
{
if (flag_weak)
make_decl_one_only (decl, cxx_comdat_group (decl));
else if (TREE_CODE (decl) == FUNCTION_DECL
|| (VAR_P (decl) && DECL_ARTIFICIAL (decl)))
/* We can just emit function and compiler-generated variables
statically; having multiple copies is (for the most part) only
a waste of space.
There are two correctness issues, however: the address of a
template instantiation with external linkage should be the
same, independent of what translation unit asks for the
address, and this will not hold when we emit multiple copies of
the function. However, there's little else we can do.
Also, by default, the typeinfo implementation assumes that
there will be only one copy of the string used as the name for
each type. Therefore, if weak symbols are unavailable, the
run-time library should perform a more conservative check; it
should perform a string comparison, rather than an address
comparison. */
TREE_PUBLIC (decl) = 0;
else
{
/* Static data member template instantiations, however, cannot
have multiple copies. */
if (DECL_INITIAL (decl) == 0
|| DECL_INITIAL (decl) == error_mark_node)
DECL_COMMON (decl) = 1;
else if (EMPTY_CONSTRUCTOR_P (DECL_INITIAL (decl)))
{
DECL_COMMON (decl) = 1;
DECL_INITIAL (decl) = error_mark_node;
}
else if (!DECL_EXPLICIT_INSTANTIATION (decl))
{
/* We can't do anything useful; leave vars for explicit
instantiation. */
DECL_EXTERNAL (decl) = 1;
DECL_NOT_REALLY_EXTERN (decl) = 0;
}
}
if (TREE_PUBLIC (decl))
DECL_COMDAT (decl) = 1;
}
/* For win32 we also want to put explicit instantiations in
linkonce sections, so that they will be merged with implicit
instantiations; otherwise we get duplicate symbol errors.
For Darwin we do not want explicit instantiations to be
linkonce. */
void
maybe_make_one_only (tree decl)
{
/* We used to say that this was not necessary on targets that support weak
symbols, because the implicit instantiations will defer to the explicit
one. However, that's not actually the case in SVR4; a strong definition
after a weak one is an error. Also, not making explicit
instantiations one_only means that we can end up with two copies of
some template instantiations. */
if (! flag_weak)
return;
/* We can't set DECL_COMDAT on functions, or cp_finish_file will think
we can get away with not emitting them if they aren't used. We need
to for variables so that cp_finish_decl will update their linkage,
because their DECL_INITIAL may not have been set properly yet. */
if (!TARGET_WEAK_NOT_IN_ARCHIVE_TOC
|| (! DECL_EXPLICIT_INSTANTIATION (decl)
&& ! DECL_TEMPLATE_SPECIALIZATION (decl)))
{
make_decl_one_only (decl, cxx_comdat_group (decl));
if (VAR_P (decl))
{
varpool_node *node = varpool_node::get_create (decl);
DECL_COMDAT (decl) = 1;
/* Mark it needed so we don't forget to emit it. */
node->forced_by_abi = true;
TREE_USED (decl) = 1;
}
}
}
/* Returns true iff DECL, a FUNCTION_DECL or VAR_DECL, has vague linkage.
This predicate will give the right answer during parsing of the
function, which other tests may not. */
bool
vague_linkage_p (tree decl)
{
if (!TREE_PUBLIC (decl))
{
/* maybe_thunk_body clears TREE_PUBLIC and DECL_ABSTRACT_P on the
maybe-in-charge 'tor variants; in that case we need to check one of
the "clones" for the real linkage. But only in that case; before
maybe_clone_body we haven't yet copied the linkage to the clones. */
if ((DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (decl)
|| DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (decl))
&& !DECL_ABSTRACT_P (decl)
&& DECL_CHAIN (decl)
&& DECL_CLONED_FUNCTION_P (DECL_CHAIN (decl)))
return vague_linkage_p (DECL_CHAIN (decl));
gcc_checking_assert (!DECL_COMDAT (decl));
return false;
}
/* Unfortunately, import_export_decl has not always been called
before the function is processed, so we cannot simply check
DECL_COMDAT. */
if (DECL_COMDAT (decl)
|| (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl))
|| (DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_INSTANTIATION (decl))
|| (VAR_P (decl) && DECL_INLINE_VAR_P (decl)))
return true;
else if (DECL_FUNCTION_SCOPE_P (decl))
/* A local static in an inline effectively has vague linkage. */
return (TREE_STATIC (decl)
&& vague_linkage_p (DECL_CONTEXT (decl)));
else
return false;
}
/* Determine whether or not we want to specifically import or export CTYPE,
using various heuristics. */
static void
import_export_class (tree ctype)
{
/* -1 for imported, 1 for exported. */
int import_export = 0;
/* It only makes sense to call this function at EOF. The reason is
that this function looks at whether or not the first non-inline
non-abstract virtual member function has been defined in this
translation unit. But, we can't possibly know that until we've
seen the entire translation unit. */
gcc_assert (at_eof);
if (CLASSTYPE_INTERFACE_KNOWN (ctype))
return;
/* If MULTIPLE_SYMBOL_SPACES is set and we saw a #pragma interface,
we will have CLASSTYPE_INTERFACE_ONLY set but not
CLASSTYPE_INTERFACE_KNOWN. In that case, we don't want to use this
heuristic because someone will supply a #pragma implementation
elsewhere, and deducing it here would produce a conflict. */
if (CLASSTYPE_INTERFACE_ONLY (ctype))
return;
if (lookup_attribute ("dllimport", TYPE_ATTRIBUTES (ctype)))
import_export = -1;
else if (lookup_attribute ("dllexport", TYPE_ATTRIBUTES (ctype)))
import_export = 1;
else if (CLASSTYPE_IMPLICIT_INSTANTIATION (ctype)
&& !flag_implicit_templates)
/* For a template class, without -fimplicit-templates, check the
repository. If the virtual table is assigned to this
translation unit, then export the class; otherwise, import
it. */
import_export = repo_export_class_p (ctype) ? 1 : -1;
else if (TYPE_POLYMORPHIC_P (ctype))
{
/* The ABI specifies that the virtual table and associated
information are emitted with the key method, if any. */
tree method = CLASSTYPE_KEY_METHOD (ctype);
/* If weak symbol support is not available, then we must be
careful not to emit the vtable when the key function is
inline. An inline function can be defined in multiple
translation units. If we were to emit the vtable in each
translation unit containing a definition, we would get
multiple definition errors at link-time. */
if (method && (flag_weak || ! DECL_DECLARED_INLINE_P (method)))
import_export = (DECL_REALLY_EXTERN (method) ? -1 : 1);
}
/* When MULTIPLE_SYMBOL_SPACES is set, we cannot count on seeing
a definition anywhere else. */
if (MULTIPLE_SYMBOL_SPACES && import_export == -1)
import_export = 0;
/* Allow back ends the chance to overrule the decision. */
if (targetm.cxx.import_export_class)
import_export = targetm.cxx.import_export_class (ctype, import_export);
if (import_export)
{
SET_CLASSTYPE_INTERFACE_KNOWN (ctype);
CLASSTYPE_INTERFACE_ONLY (ctype) = (import_export < 0);
}
}
/* Return true if VAR has already been provided to the back end; in that
case VAR should not be modified further by the front end. */
static bool
var_finalized_p (tree var)
{
return varpool_node::get_create (var)->definition;
}
/* DECL is a VAR_DECL or FUNCTION_DECL which, for whatever reason,
must be emitted in this translation unit. Mark it as such. */
void
mark_needed (tree decl)
{
TREE_USED (decl) = 1;
if (TREE_CODE (decl) == FUNCTION_DECL)
{
/* Extern inline functions don't become needed when referenced.
If we know a method will be emitted in other TU and no new
functions can be marked reachable, just use the external
definition. */
struct cgraph_node *node = cgraph_node::get_create (decl);
node->forced_by_abi = true;
/* #pragma interface and -frepo code can call mark_needed for
maybe-in-charge 'tors; mark the clones as well. */
tree clone;
FOR_EACH_CLONE (clone, decl)
mark_needed (clone);
}
else if (VAR_P (decl))
{
varpool_node *node = varpool_node::get_create (decl);
/* C++ frontend use mark_decl_references to force COMDAT variables
to be output that might appear dead otherwise. */
node->forced_by_abi = true;
}
}
/* DECL is either a FUNCTION_DECL or a VAR_DECL. This function
returns true if a definition of this entity should be provided in
this object file. Callers use this function to determine whether
or not to let the back end know that a definition of DECL is
available in this translation unit. */
bool
decl_needed_p (tree decl)
{
gcc_assert (VAR_OR_FUNCTION_DECL_P (decl));
/* This function should only be called at the end of the translation
unit. We cannot be sure of whether or not something will be
COMDAT until that point. */
gcc_assert (at_eof);
/* All entities with external linkage that are not COMDAT/EXTERN should be
emitted; they may be referred to from other object files. */
if (TREE_PUBLIC (decl) && !DECL_COMDAT (decl) && !DECL_REALLY_EXTERN (decl))
return true;
/* Functions marked "dllexport" must be emitted so that they are
visible to other DLLs. */
if (flag_keep_inline_dllexport
&& lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)))
return true;
/* When not optimizing, do not bother to produce definitions for extern
symbols. */
if (DECL_REALLY_EXTERN (decl)
&& ((TREE_CODE (decl) != FUNCTION_DECL
&& !optimize)
|| (TREE_CODE (decl) == FUNCTION_DECL
&& !opt_for_fn (decl, optimize)))
&& !lookup_attribute ("always_inline", decl))
return false;
/* If this entity was used, let the back end see it; it will decide
whether or not to emit it into the object file. */
if (TREE_USED (decl))
return true;
/* Virtual functions might be needed for devirtualization. */
if (flag_devirtualize
&& TREE_CODE (decl) == FUNCTION_DECL
&& DECL_VIRTUAL_P (decl))
return true;
/* Otherwise, DECL does not need to be emitted -- yet. A subsequent
reference to DECL might cause it to be emitted later. */
return false;
}
/* If necessary, write out the vtables for the dynamic class CTYPE.
Returns true if any vtables were emitted. */
static bool
maybe_emit_vtables (tree ctype)
{
tree vtbl;
tree primary_vtbl;
int needed = 0;
varpool_node *current = NULL, *last = NULL;
/* If the vtables for this class have already been emitted there is
nothing more to do. */
primary_vtbl = CLASSTYPE_VTABLES (ctype);
if (var_finalized_p (primary_vtbl))
return false;
/* Ignore dummy vtables made by get_vtable_decl. */
if (TREE_TYPE (primary_vtbl) == void_type_node)
return false;
/* On some targets, we cannot determine the key method until the end
of the translation unit -- which is when this function is
called. */
if (!targetm.cxx.key_method_may_be_inline ())
determine_key_method (ctype);
/* See if any of the vtables are needed. */
for (vtbl = CLASSTYPE_VTABLES (ctype); vtbl; vtbl = DECL_CHAIN (vtbl))
{
import_export_decl (vtbl);
if (DECL_NOT_REALLY_EXTERN (vtbl) && decl_needed_p (vtbl))
needed = 1;
}
if (!needed)
{
/* If the references to this class' vtables are optimized away,
still emit the appropriate debugging information. See
dfs_debug_mark. */
if (DECL_COMDAT (primary_vtbl)
&& CLASSTYPE_DEBUG_REQUESTED (ctype))
note_debug_info_needed (ctype);
return false;
}
/* The ABI requires that we emit all of the vtables if we emit any
of them. */
for (vtbl = CLASSTYPE_VTABLES (ctype); vtbl; vtbl = DECL_CHAIN (vtbl))
{
/* Mark entities references from the virtual table as used. */
mark_vtable_entries (vtbl);
if (TREE_TYPE (DECL_INITIAL (vtbl)) == 0)
{
vec<tree, va_gc> *cleanups = NULL;
tree expr = store_init_value (vtbl, DECL_INITIAL (vtbl), &cleanups,
LOOKUP_NORMAL);
/* It had better be all done at compile-time. */
gcc_assert (!expr && !cleanups);
}
/* Write it out. */
DECL_EXTERNAL (vtbl) = 0;
rest_of_decl_compilation (vtbl, 1, 1);
/* Because we're only doing syntax-checking, we'll never end up
actually marking the variable as written. */
if (flag_syntax_only)
TREE_ASM_WRITTEN (vtbl) = 1;
else if (DECL_ONE_ONLY (vtbl))
{
current = varpool_node::get_create (vtbl);
if (last)
current->add_to_same_comdat_group (last);
last = current;
}
}
/* Since we're writing out the vtable here, also write the debug
info. */
note_debug_info_needed (ctype);
return true;
}
/* A special return value from type_visibility meaning internal
linkage. */
enum { VISIBILITY_ANON = VISIBILITY_INTERNAL+1 };
/* walk_tree helper function for type_visibility. */
static tree
min_vis_r (tree *tp, int *walk_subtrees, void *data)
{
int *vis_p = (int *)data;
if (! TYPE_P (*tp))
{
*walk_subtrees = 0;
}
else if (OVERLOAD_TYPE_P (*tp)
&& !TREE_PUBLIC (TYPE_MAIN_DECL (*tp)))
{
*vis_p = VISIBILITY_ANON;
return *tp;
}
else if (CLASS_TYPE_P (*tp)
&& CLASSTYPE_VISIBILITY (*tp) > *vis_p)
*vis_p = CLASSTYPE_VISIBILITY (*tp);
return NULL;
}
/* Returns the visibility of TYPE, which is the minimum visibility of its
component types. */
static int
type_visibility (tree type)
{
int vis = VISIBILITY_DEFAULT;
cp_walk_tree_without_duplicates (&type, min_vis_r, &vis);
return vis;
}
/* Limit the visibility of DECL to VISIBILITY, if not explicitly
specified (or if VISIBILITY is static). If TMPL is true, this
constraint is for a template argument, and takes precedence
over explicitly-specified visibility on the template. */
static void
constrain_visibility (tree decl, int visibility, bool tmpl)
{
if (visibility == VISIBILITY_ANON)
{
/* extern "C" declarations aren't affected by the anonymous
namespace. */
if (!DECL_EXTERN_C_P (decl))
{
TREE_PUBLIC (decl) = 0;
DECL_WEAK (decl) = 0;
DECL_COMMON (decl) = 0;
DECL_COMDAT (decl) = false;
if (VAR_OR_FUNCTION_DECL_P (decl))
{
struct symtab_node *snode = symtab_node::get (decl);
if (snode)
snode->set_comdat_group (NULL);
}
DECL_INTERFACE_KNOWN (decl) = 1;
if (DECL_LANG_SPECIFIC (decl))
DECL_NOT_REALLY_EXTERN (decl) = 1;
}
}
else if (visibility > DECL_VISIBILITY (decl)
&& (tmpl || !DECL_VISIBILITY_SPECIFIED (decl)))
{
DECL_VISIBILITY (decl) = (enum symbol_visibility) visibility;
/* This visibility was not specified. */
DECL_VISIBILITY_SPECIFIED (decl) = false;
}
}
/* Constrain the visibility of DECL based on the visibility of its template
arguments. */
static void
constrain_visibility_for_template (tree decl, tree targs)
{
/* If this is a template instantiation, check the innermost
template args for visibility constraints. The outer template
args are covered by the class check. */
tree args = INNERMOST_TEMPLATE_ARGS (targs);
int i;
for (i = TREE_VEC_LENGTH (args); i > 0; --i)
{
int vis = 0;
tree arg = TREE_VEC_ELT (args, i-1);
if (TYPE_P (arg))
vis = type_visibility (arg);
else
{
if (REFERENCE_REF_P (arg))
arg = TREE_OPERAND (arg, 0);
if (TREE_TYPE (arg))
STRIP_NOPS (arg);
if (TREE_CODE (arg) == ADDR_EXPR)
arg = TREE_OPERAND (arg, 0);
if (VAR_OR_FUNCTION_DECL_P (arg))
{
if (! TREE_PUBLIC (arg))
vis = VISIBILITY_ANON;
else
vis = DECL_VISIBILITY (arg);
}
}
if (vis)
constrain_visibility (decl, vis, true);
}
}
/* Like c_determine_visibility, but with additional C++-specific
behavior.
Function-scope entities can rely on the function's visibility because
it is set in start_preparsed_function.
Class-scope entities cannot rely on the class's visibility until the end
of the enclosing class definition.
Note that because namespaces have multiple independent definitions,
namespace visibility is handled elsewhere using the #pragma visibility
machinery rather than by decorating the namespace declaration.
The goal is for constraints from the type to give a diagnostic, and
other constraints to be applied silently. */
void
determine_visibility (tree decl)
{
/* Remember that all decls get VISIBILITY_DEFAULT when built. */
/* Only relevant for names with external linkage. */
if (!TREE_PUBLIC (decl))
return;
/* Cloned constructors and destructors get the same visibility as
the underlying function. That should be set up in
maybe_clone_body. */
gcc_assert (!DECL_CLONED_FUNCTION_P (decl));
bool orig_visibility_specified = DECL_VISIBILITY_SPECIFIED (decl);
enum symbol_visibility orig_visibility = DECL_VISIBILITY (decl);
/* The decl may be a template instantiation, which could influence
visibilty. */
tree template_decl = NULL_TREE;
if (TREE_CODE (decl) == TYPE_DECL)
{
if (CLASS_TYPE_P (TREE_TYPE (decl)))
{
if (CLASSTYPE_USE_TEMPLATE (TREE_TYPE (decl)))
template_decl = decl;
}
else if (TYPE_TEMPLATE_INFO (TREE_TYPE (decl)))
template_decl = decl;
}
else if (DECL_LANG_SPECIFIC (decl) && DECL_USE_TEMPLATE (decl))
template_decl = decl;
/* If DECL is a member of a class, visibility specifiers on the
class can influence the visibility of the DECL. */
tree class_type = NULL_TREE;
if (DECL_CLASS_SCOPE_P (decl))
class_type = DECL_CONTEXT (decl);
else
{
/* Not a class member. */
/* Virtual tables have DECL_CONTEXT set to their associated class,
so they are automatically handled above. */
gcc_assert (!VAR_P (decl)
|| !DECL_VTABLE_OR_VTT_P (decl));
if (DECL_FUNCTION_SCOPE_P (decl) && ! DECL_VISIBILITY_SPECIFIED (decl))
{
/* Local statics and classes get the visibility of their
containing function by default, except that
-fvisibility-inlines-hidden doesn't affect them. */
tree fn = DECL_CONTEXT (decl);
if (DECL_VISIBILITY_SPECIFIED (fn))
{
DECL_VISIBILITY (decl) = DECL_VISIBILITY (fn);
DECL_VISIBILITY_SPECIFIED (decl) =
DECL_VISIBILITY_SPECIFIED (fn);
}
else
{
if (DECL_CLASS_SCOPE_P (fn))
determine_visibility_from_class (decl, DECL_CONTEXT (fn));
else if (determine_hidden_inline (fn))
{
DECL_VISIBILITY (decl) = default_visibility;
DECL_VISIBILITY_SPECIFIED (decl) =
visibility_options.inpragma;
}
else
{
DECL_VISIBILITY (decl) = DECL_VISIBILITY (fn);
DECL_VISIBILITY_SPECIFIED (decl) =
DECL_VISIBILITY_SPECIFIED (fn);
}
}
/* Local classes in templates have CLASSTYPE_USE_TEMPLATE set,
but have no TEMPLATE_INFO, so don't try to check it. */
template_decl = NULL_TREE;
}
else if (VAR_P (decl) && DECL_TINFO_P (decl)
&& flag_visibility_ms_compat)
{
/* Under -fvisibility-ms-compat, types are visible by default,
even though their contents aren't. */
tree underlying_type = TREE_TYPE (DECL_NAME (decl));
int underlying_vis = type_visibility (underlying_type);
if (underlying_vis == VISIBILITY_ANON
|| (CLASS_TYPE_P (underlying_type)
&& CLASSTYPE_VISIBILITY_SPECIFIED (underlying_type)))
constrain_visibility (decl, underlying_vis, false);
else
DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
}
else if (VAR_P (decl) && DECL_TINFO_P (decl))
{
/* tinfo visibility is based on the type it's for. */
constrain_visibility
(decl, type_visibility (TREE_TYPE (DECL_NAME (decl))), false);
/* Give the target a chance to override the visibility associated
with DECL. */
if (TREE_PUBLIC (decl)
&& !DECL_REALLY_EXTERN (decl)
&& CLASS_TYPE_P (TREE_TYPE (DECL_NAME (decl)))
&& !CLASSTYPE_VISIBILITY_SPECIFIED (TREE_TYPE (DECL_NAME (decl))))
targetm.cxx.determine_class_data_visibility (decl);
}
else if (template_decl)
/* Template instantiations and specializations get visibility based
on their template unless they override it with an attribute. */;
else if (! DECL_VISIBILITY_SPECIFIED (decl))
{
if (determine_hidden_inline (decl))
DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
else
{
/* Set default visibility to whatever the user supplied with
#pragma GCC visibility or a namespace visibility attribute. */
DECL_VISIBILITY (decl) = default_visibility;
DECL_VISIBILITY_SPECIFIED (decl) = visibility_options.inpragma;
}
}
}
if (template_decl)
{
/* If the specialization doesn't specify visibility, use the
visibility from the template. */
tree tinfo = get_template_info (template_decl);
tree args = TI_ARGS (tinfo);
tree attribs = (TREE_CODE (decl) == TYPE_DECL
? TYPE_ATTRIBUTES (TREE_TYPE (decl))
: DECL_ATTRIBUTES (decl));
if (args != error_mark_node)
{
tree pattern = DECL_TEMPLATE_RESULT (TI_TEMPLATE (tinfo));
if (!DECL_VISIBILITY_SPECIFIED (decl))
{
if (!DECL_VISIBILITY_SPECIFIED (pattern)
&& determine_hidden_inline (decl))
DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
else
{
DECL_VISIBILITY (decl) = DECL_VISIBILITY (pattern);
DECL_VISIBILITY_SPECIFIED (decl)
= DECL_VISIBILITY_SPECIFIED (pattern);
}
}
if (args
/* Template argument visibility outweighs #pragma or namespace
visibility, but not an explicit attribute. */
&& !lookup_attribute ("visibility", attribs))
{
int depth = TMPL_ARGS_DEPTH (args);
if (DECL_VISIBILITY_SPECIFIED (decl))
{
/* A class template member with explicit visibility
overrides the class visibility, so we need to apply
all the levels of template args directly. */
int i;
for (i = 1; i <= depth; ++i)
{
tree lev = TMPL_ARGS_LEVEL (args, i);
constrain_visibility_for_template (decl, lev);
}
}
else if (PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo)))
/* Limit visibility based on its template arguments. */
constrain_visibility_for_template (decl, args);
}
}
}
if (class_type)
determine_visibility_from_class (decl, class_type);
if (decl_anon_ns_mem_p (decl))
/* Names in an anonymous namespace get internal linkage.
This might change once we implement export. */
constrain_visibility (decl, VISIBILITY_ANON, false);
else if (TREE_CODE (decl) != TYPE_DECL)
{
/* Propagate anonymity from type to decl. */
int tvis = type_visibility (TREE_TYPE (decl));
if (tvis == VISIBILITY_ANON
|| ! DECL_VISIBILITY_SPECIFIED (decl))
constrain_visibility (decl, tvis, false);
}
else if (no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/true))
/* DR 757: A type without linkage shall not be used as the type of a
variable or function with linkage, unless
o the variable or function has extern "C" linkage (7.5 [dcl.link]), or
o the variable or function is not used (3.2 [basic.def.odr]) or is
defined in the same translation unit.
Since non-extern "C" decls need to be defined in the same
translation unit, we can make the type internal. */
constrain_visibility (decl, VISIBILITY_ANON, false);
/* If visibility changed and DECL already has DECL_RTL, ensure
symbol flags are updated. */
if ((DECL_VISIBILITY (decl) != orig_visibility
|| DECL_VISIBILITY_SPECIFIED (decl) != orig_visibility_specified)
&& ((VAR_P (decl) && TREE_STATIC (decl))
|| TREE_CODE (decl) == FUNCTION_DECL)
&& DECL_RTL_SET_P (decl))
make_decl_rtl (decl);
}
/* By default, static data members and function members receive
the visibility of their containing class. */
static void
determine_visibility_from_class (tree decl, tree class_type)
{
if (DECL_VISIBILITY_SPECIFIED (decl))
return;
if (determine_hidden_inline (decl))
DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
else
{
/* Default to the class visibility. */
DECL_VISIBILITY (decl) = CLASSTYPE_VISIBILITY (class_type);
DECL_VISIBILITY_SPECIFIED (decl)
= CLASSTYPE_VISIBILITY_SPECIFIED (class_type);
}
/* Give the target a chance to override the visibility associated
with DECL. */
if (VAR_P (decl)
&& (DECL_TINFO_P (decl)
|| (DECL_VTABLE_OR_VTT_P (decl)
/* Construction virtual tables are not exported because
they cannot be referred to from other object files;
their name is not standardized by the ABI. */
&& !DECL_CONSTRUCTION_VTABLE_P (decl)))
&& TREE_PUBLIC (decl)
&& !DECL_REALLY_EXTERN (decl)
&& !CLASSTYPE_VISIBILITY_SPECIFIED (class_type))
targetm.cxx.determine_class_data_visibility (decl);
}
/* Returns true iff DECL is an inline that should get hidden visibility
because of -fvisibility-inlines-hidden. */
static bool
determine_hidden_inline (tree decl)
{
return (visibility_options.inlines_hidden
/* Don't do this for inline templates; specializations might not be
inline, and we don't want them to inherit the hidden
visibility. We'll set it here for all inline instantiations. */
&& !processing_template_decl
&& TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl)
&& (! DECL_LANG_SPECIFIC (decl)
|| ! DECL_EXPLICIT_INSTANTIATION (decl)));
}
/* Constrain the visibility of a class TYPE based on the visibility of its
field types. Warn if any fields require lesser visibility. */
void
constrain_class_visibility (tree type)
{
tree binfo;
tree t;
int i;
int vis = type_visibility (type);
if (vis == VISIBILITY_ANON
|| DECL_IN_SYSTEM_HEADER (TYPE_MAIN_DECL (type)))
return;
/* Don't warn about visibility if the class has explicit visibility. */
if (CLASSTYPE_VISIBILITY_SPECIFIED (type))
vis = VISIBILITY_INTERNAL;
for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
if (TREE_CODE (t) == FIELD_DECL && TREE_TYPE (t) != error_mark_node
&& !DECL_ARTIFICIAL (t))
{
tree ftype = strip_pointer_or_array_types (TREE_TYPE (t));
int subvis = type_visibility (ftype);
if (subvis == VISIBILITY_ANON)
{
if (!in_main_input_context())
{
tree nlt = no_linkage_check (ftype, /*relaxed_p=*/false);
if (nlt)
{
if (same_type_p (TREE_TYPE (t), nlt))
warning (OPT_Wsubobject_linkage, "\
%qT has a field %qD whose type has no linkage",
type, t);
else
warning (OPT_Wsubobject_linkage, "\
%qT has a field %qD whose type depends on the type %qT which has no linkage",
type, t, nlt);
}
else
warning (OPT_Wsubobject_linkage, "\
%qT has a field %qD whose type uses the anonymous namespace",
type, t);
}
}
else if (MAYBE_CLASS_TYPE_P (ftype)
&& vis < VISIBILITY_HIDDEN
&& subvis >= VISIBILITY_HIDDEN)
warning (OPT_Wattributes, "\
%qT declared with greater visibility than the type of its field %qD",
type, t);
}
binfo = TYPE_BINFO (type);
for (i = 0; BINFO_BASE_ITERATE (binfo, i, t); ++i)
{
int subvis = type_visibility (TREE_TYPE (t));
if (subvis == VISIBILITY_ANON)
{
if (!in_main_input_context())
{
tree nlt = no_linkage_check (TREE_TYPE (t), /*relaxed_p=*/false);
if (nlt)
{
if (same_type_p (TREE_TYPE (t), nlt))
warning (OPT_Wsubobject_linkage, "\
%qT has a base %qT whose type has no linkage",
type, TREE_TYPE (t));
else
warning (OPT_Wsubobject_linkage, "\
%qT has a base %qT whose type depends on the type %qT which has no linkage",
type, TREE_TYPE (t), nlt);
}
else
warning (OPT_Wsubobject_linkage, "\
%qT has a base %qT whose type uses the anonymous namespace",
type, TREE_TYPE (t));
}
}
else if (vis < VISIBILITY_HIDDEN
&& subvis >= VISIBILITY_HIDDEN)
warning (OPT_Wattributes, "\
%qT declared with greater visibility than its base %qT",
type, TREE_TYPE (t));
}
}
/* Functions for adjusting the visibility of a tagged type and its nested
types and declarations when it gets a name for linkage purposes from a
typedef. */
static void bt_reset_linkage_1 (binding_entry, void *);
static void bt_reset_linkage_2 (binding_entry, void *);
/* First reset the visibility of all the types. */
static void
reset_type_linkage_1 (tree type)
{
set_linkage_according_to_type (type, TYPE_MAIN_DECL (type));
if (CLASS_TYPE_P (type))
binding_table_foreach (CLASSTYPE_NESTED_UTDS (type),
bt_reset_linkage_1, NULL);
}
static void
bt_reset_linkage_1 (binding_entry b, void */*data*/)
{
reset_type_linkage_1 (b->type);
}
/* Then reset the visibility of any static data members or member
functions that use those types. */
static void
reset_decl_linkage (tree decl)
{
if (TREE_PUBLIC (decl))
return;
if (DECL_CLONED_FUNCTION_P (decl))
return;
TREE_PUBLIC (decl) = true;
DECL_INTERFACE_KNOWN (decl) = false;
determine_visibility (decl);
tentative_decl_linkage (decl);
}
static void
reset_type_linkage_2 (tree type)
{
if (CLASS_TYPE_P (type))
{
if (tree vt = CLASSTYPE_VTABLES (type))
{
tree name = mangle_vtbl_for_type (type);
DECL_NAME (vt) = name;
SET_DECL_ASSEMBLER_NAME (vt, name);
reset_decl_linkage (vt);
}
if (tree ti = CLASSTYPE_TYPEINFO_VAR (type))
{
tree name = mangle_typeinfo_for_type (type);
DECL_NAME (ti) = name;
SET_DECL_ASSEMBLER_NAME (ti, name);
TREE_TYPE (name) = type;
reset_decl_linkage (ti);
}
for (tree m = TYPE_FIELDS (type); m; m = DECL_CHAIN (m))
{
tree mem = STRIP_TEMPLATE (m);
if (TREE_CODE (mem) == VAR_DECL || TREE_CODE (mem) == FUNCTION_DECL)
reset_decl_linkage (mem);
}
binding_table_foreach (CLASSTYPE_NESTED_UTDS (type),
bt_reset_linkage_2, NULL);
}
}
static void
bt_reset_linkage_2 (binding_entry b, void */*data*/)
{
reset_type_linkage_2 (b->type);
}
void
reset_type_linkage (tree type)
{
reset_type_linkage_1 (type);
reset_type_linkage_2 (type);
}
/* Set up our initial idea of what the linkage of DECL should be. */
void
tentative_decl_linkage (tree decl)
{
if (DECL_INTERFACE_KNOWN (decl))
/* We've already made a decision as to how this function will
be handled. */;
else if (vague_linkage_p (decl))
{
if (TREE_CODE (decl) == FUNCTION_DECL
&& decl_defined_p (decl))
{
DECL_EXTERNAL (decl) = 1;
DECL_NOT_REALLY_EXTERN (decl) = 1;
note_vague_linkage_fn (decl);
/* A non-template inline function with external linkage will
always be COMDAT. As we must eventually determine the
linkage of all functions, and as that causes writes to
the data mapped in from the PCH file, it's advantageous
to mark the functions at this point. */
if (DECL_DECLARED_INLINE_P (decl)
&& (!DECL_IMPLICIT_INSTANTIATION (decl)
|| DECL_DEFAULTED_FN (decl)))
{
/* This function must have external linkage, as
otherwise DECL_INTERFACE_KNOWN would have been
set. */
gcc_assert (TREE_PUBLIC (decl));
comdat_linkage (decl);
DECL_INTERFACE_KNOWN (decl) = 1;
}
}
else if (VAR_P (decl))
maybe_commonize_var (decl);
}
}
/* DECL is a FUNCTION_DECL or VAR_DECL. If the object file linkage
for DECL has not already been determined, do so now by setting
DECL_EXTERNAL, DECL_COMDAT and other related flags. Until this
function is called entities with vague linkage whose definitions
are available must have TREE_PUBLIC set.
If this function decides to place DECL in COMDAT, it will set
appropriate flags -- but will not clear DECL_EXTERNAL. It is up to
the caller to decide whether or not to clear DECL_EXTERNAL. Some
callers defer that decision until it is clear that DECL is actually
required. */
void
import_export_decl (tree decl)
{
int emit_p;
bool comdat_p;
bool import_p;
tree class_type = NULL_TREE;
if (DECL_INTERFACE_KNOWN (decl))
return;
/* We cannot determine what linkage to give to an entity with vague
linkage until the end of the file. For example, a virtual table
for a class will be defined if and only if the key method is
defined in this translation unit. As a further example, consider
that when compiling a translation unit that uses PCH file with
"-frepo" it would be incorrect to make decisions about what
entities to emit when building the PCH; those decisions must be
delayed until the repository information has been processed. */
gcc_assert (at_eof);
/* Object file linkage for explicit instantiations is handled in
mark_decl_instantiated. For static variables in functions with
vague linkage, maybe_commonize_var is used.
Therefore, the only declarations that should be provided to this
function are those with external linkage that are:
* implicit instantiations of function templates
* inline function
* implicit instantiations of static data members of class
templates
* virtual tables
* typeinfo objects
Furthermore, all entities that reach this point must have a
definition available in this translation unit.
The following assertions check these conditions. */
gcc_assert (VAR_OR_FUNCTION_DECL_P (decl));
/* Any code that creates entities with TREE_PUBLIC cleared should
also set DECL_INTERFACE_KNOWN. */
gcc_assert (TREE_PUBLIC (decl));
if (TREE_CODE (decl) == FUNCTION_DECL)
gcc_assert (DECL_IMPLICIT_INSTANTIATION (decl)
|| DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (decl)
|| DECL_DECLARED_INLINE_P (decl));
else
gcc_assert (DECL_IMPLICIT_INSTANTIATION (decl)
|| DECL_VTABLE_OR_VTT_P (decl)
|| DECL_TINFO_P (decl));
/* Check that a definition of DECL is available in this translation
unit. */
gcc_assert (!DECL_REALLY_EXTERN (decl));
/* Assume that DECL will not have COMDAT linkage. */
comdat_p = false;
/* Assume that DECL will not be imported into this translation
unit. */
import_p = false;
/* See if the repository tells us whether or not to emit DECL in
this translation unit. */
emit_p = repo_emit_p (decl);
if (emit_p == 0)
import_p = true;
else if (emit_p == 1)
{
/* The repository indicates that this entity should be defined
here. Make sure the back end honors that request. */
mark_needed (decl);
/* Output the definition as an ordinary strong definition. */
DECL_EXTERNAL (decl) = 0;
DECL_INTERFACE_KNOWN (decl) = 1;
return;
}
if (import_p)
/* We have already decided what to do with this DECL; there is no
need to check anything further. */
;
else if (VAR_P (decl) && DECL_VTABLE_OR_VTT_P (decl))
{
class_type = DECL_CONTEXT (decl);
import_export_class (class_type);
if (CLASSTYPE_INTERFACE_KNOWN (class_type)
&& CLASSTYPE_INTERFACE_ONLY (class_type))
import_p = true;
else if ((!flag_weak || TARGET_WEAK_NOT_IN_ARCHIVE_TOC)
&& !CLASSTYPE_USE_TEMPLATE (class_type)
&& CLASSTYPE_KEY_METHOD (class_type)
&& !DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (class_type)))
/* The ABI requires that all virtual tables be emitted with
COMDAT linkage. However, on systems where COMDAT symbols
don't show up in the table of contents for a static
archive, or on systems without weak symbols (where we
approximate COMDAT linkage by using internal linkage), the
linker will report errors about undefined symbols because
it will not see the virtual table definition. Therefore,
in the case that we know that the virtual table will be
emitted in only one translation unit, we make the virtual
table an ordinary definition with external linkage. */
DECL_EXTERNAL (decl) = 0;
else if (CLASSTYPE_INTERFACE_KNOWN (class_type))
{
/* CLASS_TYPE is being exported from this translation unit,
so DECL should be defined here. */
if (!flag_weak && CLASSTYPE_EXPLICIT_INSTANTIATION (class_type))
/* If a class is declared in a header with the "extern
template" extension, then it will not be instantiated,
even in translation units that would normally require
it. Often such classes are explicitly instantiated in
one translation unit. Therefore, the explicit
instantiation must be made visible to other translation
units. */
DECL_EXTERNAL (decl) = 0;
else
{
/* The generic C++ ABI says that class data is always
COMDAT, even if there is a key function. Some
variants (e.g., the ARM EABI) says that class data
only has COMDAT linkage if the class data might be
emitted in more than one translation unit. When the
key method can be inline and is inline, we still have
to arrange for comdat even though
class_data_always_comdat is false. */
if (!CLASSTYPE_KEY_METHOD (class_type)
|| DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (class_type))
|| targetm.cxx.class_data_always_comdat ())
{
/* The ABI requires COMDAT linkage. Normally, we
only emit COMDAT things when they are needed;
make sure that we realize that this entity is
indeed needed. */
comdat_p = true;
mark_needed (decl);
}
}
}
else if (!flag_implicit_templates
&& CLASSTYPE_IMPLICIT_INSTANTIATION (class_type))
import_p = true;
else
comdat_p = true;
}
else if (VAR_P (decl) && DECL_TINFO_P (decl))
{
tree type = TREE_TYPE (DECL_NAME (decl));
if (CLASS_TYPE_P (type))
{
class_type = type;
import_export_class (type);
if (CLASSTYPE_INTERFACE_KNOWN (type)
&& TYPE_POLYMORPHIC_P (type)
&& CLASSTYPE_INTERFACE_ONLY (type)
/* If -fno-rtti was specified, then we cannot be sure
that RTTI information will be emitted with the
virtual table of the class, so we must emit it
wherever it is used. */
&& flag_rtti)
import_p = true;
else
{
if (CLASSTYPE_INTERFACE_KNOWN (type)
&& !CLASSTYPE_INTERFACE_ONLY (type))
{
comdat_p = (targetm.cxx.class_data_always_comdat ()
|| (CLASSTYPE_KEY_METHOD (type)
&& DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (type))));
mark_needed (decl);
if (!flag_weak)
{
comdat_p = false;
DECL_EXTERNAL (decl) = 0;
}
}
else
comdat_p = true;
}
}
else
comdat_p = true;
}
else if (DECL_TEMPLOID_INSTANTIATION (decl))
{
/* DECL is an implicit instantiation of a function or static
data member. */
if ((flag_implicit_templates
&& !flag_use_repository)
|| (flag_implicit_inline_templates
&& TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl)))
comdat_p = true;
else
/* If we are not implicitly generating templates, then mark
this entity as undefined in this translation unit. */
import_p = true;
}
else if (DECL_FUNCTION_MEMBER_P (decl))
{
if (!DECL_DECLARED_INLINE_P (decl))
{
tree ctype = DECL_CONTEXT (decl);
import_export_class (ctype);
if (CLASSTYPE_INTERFACE_KNOWN (ctype))
{
DECL_NOT_REALLY_EXTERN (decl)
= ! (CLASSTYPE_INTERFACE_ONLY (ctype)
|| (DECL_DECLARED_INLINE_P (decl)
&& ! flag_implement_inlines
&& !DECL_VINDEX (decl)));
if (!DECL_NOT_REALLY_EXTERN (decl))
DECL_EXTERNAL (decl) = 1;
/* Always make artificials weak. */
if (DECL_ARTIFICIAL (decl) && flag_weak)
comdat_p = true;
else
maybe_make_one_only (decl);
}
}
else
comdat_p = true;
}
else
comdat_p = true;
if (import_p)
{
/* If we are importing DECL into this translation unit, mark is
an undefined here. */
DECL_EXTERNAL (decl) = 1;
DECL_NOT_REALLY_EXTERN (decl) = 0;
}
else if (comdat_p)
{
/* If we decided to put DECL in COMDAT, mark it accordingly at
this point. */
comdat_linkage (decl);
}
DECL_INTERFACE_KNOWN (decl) = 1;
}
/* Return an expression that performs the destruction of DECL, which
must be a VAR_DECL whose type has a non-trivial destructor, or is
an array whose (innermost) elements have a non-trivial destructor. */
tree
build_cleanup (tree decl)
{
tree clean = cxx_maybe_build_cleanup (decl, tf_warning_or_error);
gcc_assert (clean != NULL_TREE);
return clean;
}
/* Returns the initialization guard variable for the variable DECL,
which has static storage duration. */
tree
get_guard (tree decl)
{
tree sname;
tree guard;
sname = mangle_guard_variable (decl);
guard = get_global_binding (sname);
if (! guard)
{
tree guard_type;
/* We use a type that is big enough to contain a mutex as well
as an integer counter. */
guard_type = targetm.cxx.guard_type ();
guard = build_decl (DECL_SOURCE_LOCATION (decl),
VAR_DECL, sname, guard_type);
/* The guard should have the same linkage as what it guards. */
TREE_PUBLIC (guard) = TREE_PUBLIC (decl);
TREE_STATIC (guard) = TREE_STATIC (decl);
DECL_COMMON (guard) = DECL_COMMON (decl);
DECL_COMDAT (guard) = DECL_COMDAT (decl);
CP_DECL_THREAD_LOCAL_P (guard) = CP_DECL_THREAD_LOCAL_P (decl);
set_decl_tls_model (guard, DECL_TLS_MODEL (decl));
if (DECL_ONE_ONLY (decl))
make_decl_one_only (guard, cxx_comdat_group (guard));
if (TREE_PUBLIC (decl))
DECL_WEAK (guard) = DECL_WEAK (decl);
DECL_VISIBILITY (guard) = DECL_VISIBILITY (decl);
DECL_VISIBILITY_SPECIFIED (guard) = DECL_VISIBILITY_SPECIFIED (decl);
DECL_ARTIFICIAL (guard) = 1;
DECL_IGNORED_P (guard) = 1;
TREE_USED (guard) = 1;
pushdecl_top_level_and_finish (guard, NULL_TREE);
}
return guard;
}
/* Return an atomic load of src with the appropriate memory model. */
static tree
build_atomic_load_byte (tree src, HOST_WIDE_INT model)
{
tree ptr_type = build_pointer_type (char_type_node);
tree mem_model = build_int_cst (integer_type_node, model);
tree t, addr, val;
unsigned int size;
int fncode;
size = tree_to_uhwi (TYPE_SIZE_UNIT (char_type_node));
fncode = BUILT_IN_ATOMIC_LOAD_N + exact_log2 (size) + 1;
t = builtin_decl_implicit ((enum built_in_function) fncode);
addr = build1 (ADDR_EXPR, ptr_type, src);
val = build_call_expr (t, 2, addr, mem_model);
return val;
}
/* Return those bits of the GUARD variable that should be set when the
guarded entity is actually initialized. */
static tree
get_guard_bits (tree guard)
{
if (!targetm.cxx.guard_mask_bit ())
{
/* We only set the first byte of the guard, in order to leave room
for a mutex in the high-order bits. */
guard = build1 (ADDR_EXPR,
build_pointer_type (TREE_TYPE (guard)),
guard);
guard = build1 (NOP_EXPR,
build_pointer_type (char_type_node),
guard);
guard = build1 (INDIRECT_REF, char_type_node, guard);
}
return guard;
}
/* Return an expression which determines whether or not the GUARD
variable has already been initialized. */
tree
get_guard_cond (tree guard, bool thread_safe)
{
tree guard_value;
if (!thread_safe)
guard = get_guard_bits (guard);
else
guard = build_atomic_load_byte (guard, MEMMODEL_ACQUIRE);
/* Mask off all but the low bit. */
if (targetm.cxx.guard_mask_bit ())
{
guard_value = integer_one_node;
if (!same_type_p (TREE_TYPE (guard_value), TREE_TYPE (guard)))
guard_value = fold_convert (TREE_TYPE (guard), guard_value);
guard = cp_build_binary_op (input_location,
BIT_AND_EXPR, guard, guard_value,
tf_warning_or_error);
}
guard_value = integer_zero_node;
if (!same_type_p (TREE_TYPE (guard_value), TREE_TYPE (guard)))
guard_value = fold_convert (TREE_TYPE (guard), guard_value);
return cp_build_binary_op (input_location,
EQ_EXPR, guard, guard_value,
tf_warning_or_error);
}
/* Return an expression which sets the GUARD variable, indicating that
the variable being guarded has been initialized. */
tree
set_guard (tree guard)
{
tree guard_init;
/* Set the GUARD to one. */
guard = get_guard_bits (guard);
guard_init = integer_one_node;
if (!same_type_p (TREE_TYPE (guard_init), TREE_TYPE (guard)))
guard_init = fold_convert (TREE_TYPE (guard), guard_init);
return cp_build_modify_expr (input_location, guard, NOP_EXPR, guard_init,
tf_warning_or_error);
}
/* Returns true iff we can tell that VAR does not have a dynamic
initializer. */
static bool
var_defined_without_dynamic_init (tree var)
{
/* If it's defined in another TU, we can't tell. */
if (DECL_EXTERNAL (var))
return false;
/* If it has a non-trivial destructor, registering the destructor
counts as dynamic initialization. */
if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (TREE_TYPE (var)))
return false;
/* If it's in this TU, its initializer has been processed, unless
it's a case of self-initialization, then DECL_INITIALIZED_P is
false while the initializer is handled by finish_id_expression. */
if (!DECL_INITIALIZED_P (var))
return false;
/* If it has no initializer or a constant one, it's not dynamic. */
return (!DECL_NONTRIVIALLY_INITIALIZED_P (var)
|| DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (var));
}
/* Returns true iff VAR is a variable that needs uses to be
wrapped for possible dynamic initialization. */
static bool
var_needs_tls_wrapper (tree var)
{
return (!error_operand_p (var)
&& CP_DECL_THREAD_LOCAL_P (var)
&& !DECL_GNU_TLS_P (var)
&& !DECL_FUNCTION_SCOPE_P (var)
&& !var_defined_without_dynamic_init (var));
}
/* Get the FUNCTION_DECL for the shared TLS init function for this
translation unit. */
static tree
get_local_tls_init_fn (void)
{
tree sname = get_identifier ("__tls_init");
tree fn = get_global_binding (sname);
if (!fn)
{
fn = build_lang_decl (FUNCTION_DECL, sname,
build_function_type (void_type_node,
void_list_node));
SET_DECL_LANGUAGE (fn, lang_c);
TREE_PUBLIC (fn) = false;
DECL_ARTIFICIAL (fn) = true;
mark_used (fn);
set_global_binding (fn);
}
return fn;
}
/* Get a FUNCTION_DECL for the init function for the thread_local
variable VAR. The init function will be an alias to the function
that initializes all the non-local TLS variables in the translation
unit. The init function is only used by the wrapper function. */
static tree
get_tls_init_fn (tree var)
{
/* Only C++11 TLS vars need this init fn. */
if (!var_needs_tls_wrapper (var))
return NULL_TREE;
/* If -fno-extern-tls-init, assume that we don't need to call
a tls init function for a variable defined in another TU. */
if (!flag_extern_tls_init && DECL_EXTERNAL (var))
return NULL_TREE;
/* If the variable is internal, or if we can't generate aliases,
call the local init function directly. */
if (!TREE_PUBLIC (var) || !TARGET_SUPPORTS_ALIASES)
return get_local_tls_init_fn ();
tree sname = mangle_tls_init_fn (var);
tree fn = get_global_binding (sname);
if (!fn)
{
fn = build_lang_decl (FUNCTION_DECL, sname,
build_function_type (void_type_node,
void_list_node));
SET_DECL_LANGUAGE (fn, lang_c);
TREE_PUBLIC (fn) = TREE_PUBLIC (var);
DECL_ARTIFICIAL (fn) = true;
DECL_COMDAT (fn) = DECL_COMDAT (var);
DECL_EXTERNAL (fn) = DECL_EXTERNAL (var);
if (DECL_ONE_ONLY (var))
make_decl_one_only (fn, cxx_comdat_group (fn));
if (TREE_PUBLIC (var))
{
tree obtype = strip_array_types (non_reference (TREE_TYPE (var)));
/* If the variable is defined somewhere else and might have static
initialization, make the init function a weak reference. */
if ((!TYPE_NEEDS_CONSTRUCTING (obtype)
|| TYPE_HAS_CONSTEXPR_CTOR (obtype)
|| TYPE_HAS_TRIVIAL_DFLT (obtype))
&& TYPE_HAS_TRIVIAL_DESTRUCTOR (obtype)
&& DECL_EXTERNAL (var))
declare_weak (fn);
else
DECL_WEAK (fn) = DECL_WEAK (var);
}
DECL_VISIBILITY (fn) = DECL_VISIBILITY (var);
DECL_VISIBILITY_SPECIFIED (fn) = DECL_VISIBILITY_SPECIFIED (var);
DECL_DLLIMPORT_P (fn) = DECL_DLLIMPORT_P (var);
DECL_IGNORED_P (fn) = 1;
mark_used (fn);
DECL_BEFRIENDING_CLASSES (fn) = var;
set_global_binding (fn);
}
return fn;
}
/* Get a FUNCTION_DECL for the init wrapper function for the thread_local
variable VAR. The wrapper function calls the init function (if any) for
VAR and then returns a reference to VAR. The wrapper function is used
in place of VAR everywhere VAR is mentioned. */
tree
get_tls_wrapper_fn (tree var)
{
/* Only C++11 TLS vars need this wrapper fn. */
if (!var_needs_tls_wrapper (var))
return NULL_TREE;
tree sname = mangle_tls_wrapper_fn (var);
tree fn = get_global_binding (sname);
if (!fn)
{
/* A named rvalue reference is an lvalue, so the wrapper should
always return an lvalue reference. */
tree type = non_reference (TREE_TYPE (var));
type = build_reference_type (type);
tree fntype = build_function_type (type, void_list_node);
fn = build_lang_decl (FUNCTION_DECL, sname, fntype);
SET_DECL_LANGUAGE (fn, lang_c);
TREE_PUBLIC (fn) = TREE_PUBLIC (var);
DECL_ARTIFICIAL (fn) = true;
DECL_IGNORED_P (fn) = 1;
/* The wrapper is inline and emitted everywhere var is used. */
DECL_DECLARED_INLINE_P (fn) = true;
if (TREE_PUBLIC (var))
{
comdat_linkage (fn);
#ifdef HAVE_GAS_HIDDEN
/* Make the wrapper bind locally; there's no reason to share
the wrapper between multiple shared objects. */
DECL_VISIBILITY (fn) = VISIBILITY_INTERNAL;
DECL_VISIBILITY_SPECIFIED (fn) = true;
#endif
}
if (!TREE_PUBLIC (fn))
DECL_INTERFACE_KNOWN (fn) = true;
mark_used (fn);
note_vague_linkage_fn (fn);
#if 0
/* We want CSE to commonize calls to the wrapper, but marking it as
pure is unsafe since it has side-effects. I guess we need a new
ECF flag even weaker than ECF_PURE. FIXME! */
DECL_PURE_P (fn) = true;
#endif
DECL_BEFRIENDING_CLASSES (fn) = var;
set_global_binding (fn);
}
return fn;
}
/* At EOF, generate the definition for the TLS wrapper function FN:
T& var_wrapper() {
if (init_fn) init_fn();
return var;
} */
static void
generate_tls_wrapper (tree fn)
{
tree var = DECL_BEFRIENDING_CLASSES (fn);
start_preparsed_function (fn, NULL_TREE, SF_DEFAULT | SF_PRE_PARSED);
tree body = begin_function_body ();
/* Only call the init fn if there might be one. */
if (tree init_fn = get_tls_init_fn (var))
{
tree if_stmt = NULL_TREE;
/* If init_fn is a weakref, make sure it exists before calling. */
if (lookup_attribute ("weak", DECL_ATTRIBUTES (init_fn)))
{
if_stmt = begin_if_stmt ();
tree addr = cp_build_addr_expr (init_fn, tf_warning_or_error);
tree cond = cp_build_binary_op (DECL_SOURCE_LOCATION (var),
NE_EXPR, addr, nullptr_node,
tf_warning_or_error);
finish_if_stmt_cond (cond, if_stmt);
}
finish_expr_stmt (build_cxx_call
(init_fn, 0, NULL, tf_warning_or_error));
if (if_stmt)
{
finish_then_clause (if_stmt);
finish_if_stmt (if_stmt);
}
}
else
/* If there's no initialization, the wrapper is a constant function. */
TREE_READONLY (fn) = true;
finish_return_stmt (convert_from_reference (var));
finish_function_body (body);
expand_or_defer_fn (finish_function (/*inline_p=*/false));
}
/* Start the process of running a particular set of global constructors
or destructors. Subroutine of do_[cd]tors. Also called from
vtv_start_verification_constructor_init_function. */
static tree
start_objects (int method_type, int initp)
{
tree body;
tree fndecl;
char type[14];
/* Make ctor or dtor function. METHOD_TYPE may be 'I' or 'D'. */
if (initp != DEFAULT_INIT_PRIORITY)
{
char joiner;
#ifdef JOINER
joiner = JOINER;
#else
joiner = '_';
#endif
sprintf (type, "sub_%c%c%.5u", method_type, joiner, initp);
}
else
sprintf (type, "sub_%c", method_type);
fndecl = build_lang_decl (FUNCTION_DECL,
get_file_function_name (type),
build_function_type_list (void_type_node,
NULL_TREE));
start_preparsed_function (fndecl, /*attrs=*/NULL_TREE, SF_PRE_PARSED);
TREE_PUBLIC (current_function_decl) = 0;
/* Mark as artificial because it's not explicitly in the user's
source code. */
DECL_ARTIFICIAL (current_function_decl) = 1;
/* Mark this declaration as used to avoid spurious warnings. */
TREE_USED (current_function_decl) = 1;
/* Mark this function as a global constructor or destructor. */
if (method_type == 'I')
DECL_GLOBAL_CTOR_P (current_function_decl) = 1;
else
DECL_GLOBAL_DTOR_P (current_function_decl) = 1;
body = begin_compound_stmt (BCS_FN_BODY);
return body;
}
/* Finish the process of running a particular set of global constructors
or destructors. Subroutine of do_[cd]tors. */
static void
finish_objects (int method_type, int initp, tree body)
{
tree fn;
/* Finish up. */
finish_compound_stmt (body);
fn = finish_function (/*inline_p=*/false);
if (method_type == 'I')
{
DECL_STATIC_CONSTRUCTOR (fn) = 1;
decl_init_priority_insert (fn, initp);
}
else
{
DECL_STATIC_DESTRUCTOR (fn) = 1;
decl_fini_priority_insert (fn, initp);
}
expand_or_defer_fn (fn);
}
/* The names of the parameters to the function created to handle
initializations and destructions for objects with static storage
duration. */
#define INITIALIZE_P_IDENTIFIER "__initialize_p"
#define PRIORITY_IDENTIFIER "__priority"
/* The name of the function we create to handle initializations and
destructions for objects with static storage duration. */
#define SSDF_IDENTIFIER "__static_initialization_and_destruction"
/* The declaration for the __INITIALIZE_P argument. */
static GTY(()) tree initialize_p_decl;
/* The declaration for the __PRIORITY argument. */
static GTY(()) tree priority_decl;
/* The declaration for the static storage duration function. */
static GTY(()) tree ssdf_decl;
/* All the static storage duration functions created in this
translation unit. */
static GTY(()) vec<tree, va_gc> *ssdf_decls;
/* A map from priority levels to information about that priority
level. There may be many such levels, so efficient lookup is
important. */
static splay_tree priority_info_map;
/* Begins the generation of the function that will handle all
initialization and destruction of objects with static storage
duration. The function generated takes two parameters of type
`int': __INITIALIZE_P and __PRIORITY. If __INITIALIZE_P is
nonzero, it performs initializations. Otherwise, it performs
destructions. It only performs those initializations or
destructions with the indicated __PRIORITY. The generated function
returns no value.
It is assumed that this function will only be called once per
translation unit. */
static tree
start_static_storage_duration_function (unsigned count)
{
tree type;
tree body;
char id[sizeof (SSDF_IDENTIFIER) + 1 /* '\0' */ + 32];
/* Create the identifier for this function. It will be of the form
SSDF_IDENTIFIER_<number>. */
sprintf (id, "%s_%u", SSDF_IDENTIFIER, count);
type = build_function_type_list (void_type_node,
integer_type_node, integer_type_node,
NULL_TREE);
/* Create the FUNCTION_DECL itself. */
ssdf_decl = build_lang_decl (FUNCTION_DECL,
get_identifier (id),
type);
TREE_PUBLIC (ssdf_decl) = 0;
DECL_ARTIFICIAL (ssdf_decl) = 1;
/* Put this function in the list of functions to be called from the
static constructors and destructors. */
if (!ssdf_decls)
{
vec_alloc (ssdf_decls, 32);
/* Take this opportunity to initialize the map from priority
numbers to information about that priority level. */
priority_info_map = splay_tree_new (splay_tree_compare_ints,
/*delete_key_fn=*/0,
/*delete_value_fn=*/
(splay_tree_delete_value_fn)
(void (*) (void)) free);
/* We always need to generate functions for the
DEFAULT_INIT_PRIORITY so enter it now. That way when we walk
priorities later, we'll be sure to find the
DEFAULT_INIT_PRIORITY. */
get_priority_info (DEFAULT_INIT_PRIORITY);
}
vec_safe_push (ssdf_decls, ssdf_decl);
/* Create the argument list. */
initialize_p_decl = cp_build_parm_decl
(ssdf_decl, get_identifier (INITIALIZE_P_IDENTIFIER), integer_type_node);
TREE_USED (initialize_p_decl) = 1;
priority_decl = cp_build_parm_decl
(ssdf_decl, get_identifier (PRIORITY_IDENTIFIER), integer_type_node);
TREE_USED (priority_decl) = 1;
DECL_CHAIN (initialize_p_decl) = priority_decl;
DECL_ARGUMENTS (ssdf_decl) = initialize_p_decl;
/* Put the function in the global scope. */
pushdecl (ssdf_decl);
/* Start the function itself. This is equivalent to declaring the
function as:
static void __ssdf (int __initialize_p, init __priority_p);
It is static because we only need to call this function from the
various constructor and destructor functions for this module. */
start_preparsed_function (ssdf_decl,
/*attrs=*/NULL_TREE,
SF_PRE_PARSED);
/* Set up the scope of the outermost block in the function. */
body = begin_compound_stmt (BCS_FN_BODY);
return body;
}
/* Finish the generation of the function which performs initialization
and destruction of objects with static storage duration. After
this point, no more such objects can be created. */
static void
finish_static_storage_duration_function (tree body)
{
/* Close out the function. */
finish_compound_stmt (body);
expand_or_defer_fn (finish_function (/*inline_p=*/false));
}
/* Return the information about the indicated PRIORITY level. If no
code to handle this level has yet been generated, generate the
appropriate prologue. */
static priority_info
get_priority_info (int priority)
{
priority_info pi;
splay_tree_node n;
n = splay_tree_lookup (priority_info_map,
(splay_tree_key) priority);
if (!n)
{
/* Create a new priority information structure, and insert it
into the map. */
pi = XNEW (struct priority_info_s);
pi->initializations_p = 0;
pi->destructions_p = 0;
splay_tree_insert (priority_info_map,
(splay_tree_key) priority,
(splay_tree_value) pi);
}
else
pi = (priority_info) n->value;
return pi;
}
/* The effective initialization priority of a DECL. */
#define DECL_EFFECTIVE_INIT_PRIORITY(decl) \
((!DECL_HAS_INIT_PRIORITY_P (decl) || DECL_INIT_PRIORITY (decl) == 0) \
? DEFAULT_INIT_PRIORITY : DECL_INIT_PRIORITY (decl))
/* Whether a DECL needs a guard to protect it against multiple
initialization. */
#define NEEDS_GUARD_P(decl) (TREE_PUBLIC (decl) && (DECL_COMMON (decl) \
|| DECL_ONE_ONLY (decl) \
|| DECL_WEAK (decl)))
/* Called from one_static_initialization_or_destruction(),
via walk_tree.
Walks the initializer list of a global variable and looks for
temporary variables (DECL_NAME() == NULL and DECL_ARTIFICIAL != 0)
and that have their DECL_CONTEXT() == NULL.
For each such temporary variable, set their DECL_CONTEXT() to
the current function. This is necessary because otherwise
some optimizers (enabled by -O2 -fprofile-arcs) might crash
when trying to refer to a temporary variable that does not have
it's DECL_CONTECT() properly set. */
static tree
fix_temporary_vars_context_r (tree *node,
int * /*unused*/,
void * /*unused1*/)
{
gcc_assert (current_function_decl);
if (TREE_CODE (*node) == BIND_EXPR)
{
tree var;
for (var = BIND_EXPR_VARS (*node); var; var = DECL_CHAIN (var))
if (VAR_P (var)
&& !DECL_NAME (var)
&& DECL_ARTIFICIAL (var)
&& !DECL_CONTEXT (var))
DECL_CONTEXT (var) = current_function_decl;
}
return NULL_TREE;
}
/* Set up to handle the initialization or destruction of DECL. If
INITP is nonzero, we are initializing the variable. Otherwise, we
are destroying it. */
static void
one_static_initialization_or_destruction (tree decl, tree init, bool initp)
{
tree guard_if_stmt = NULL_TREE;
tree guard;
/* If we are supposed to destruct and there's a trivial destructor,
nothing has to be done. */
if (!initp
&& TYPE_HAS_TRIVIAL_DESTRUCTOR (TREE_TYPE (decl)))
return;
/* Trick the compiler into thinking we are at the file and line
where DECL was declared so that error-messages make sense, and so
that the debugger will show somewhat sensible file and line
information. */
input_location = DECL_SOURCE_LOCATION (decl);
/* Make sure temporary variables in the initialiser all have
their DECL_CONTEXT() set to a value different from NULL_TREE.
This can happen when global variables initializers are built.
In that case, the DECL_CONTEXT() of the global variables _AND_ of all
the temporary variables that might have been generated in the
accompanying initializers is NULL_TREE, meaning the variables have been
declared in the global namespace.
What we want to do here is to fix that and make sure the DECL_CONTEXT()
of the temporaries are set to the current function decl. */
cp_walk_tree_without_duplicates (&init,
fix_temporary_vars_context_r,
NULL);
/* Because of:
[class.access.spec]
Access control for implicit calls to the constructors,
the conversion functions, or the destructor called to
create and destroy a static data member is performed as
if these calls appeared in the scope of the member's
class.
we pretend we are in a static member function of the class of
which the DECL is a member. */
if (member_p (decl))
{
DECL_CONTEXT (current_function_decl) = DECL_CONTEXT (decl);
DECL_STATIC_FUNCTION_P (current_function_decl) = 1;
}
/* Assume we don't need a guard. */
guard = NULL_TREE;
/* We need a guard if this is an object with external linkage that
might be initialized in more than one place. (For example, a
static data member of a template, when the data member requires
construction.) */
if (NEEDS_GUARD_P (decl))
{
tree guard_cond;
guard = get_guard (decl);
/* When using __cxa_atexit, we just check the GUARD as we would
for a local static. */
if (flag_use_cxa_atexit)
{
/* When using __cxa_atexit, we never try to destroy
anything from a static destructor. */
gcc_assert (initp);
guard_cond = get_guard_cond (guard, false);
}
/* If we don't have __cxa_atexit, then we will be running
destructors from .fini sections, or their equivalents. So,
we need to know how many times we've tried to initialize this
object. We do initializations only if the GUARD is zero,
i.e., if we are the first to initialize the variable. We do
destructions only if the GUARD is one, i.e., if we are the
last to destroy the variable. */
else if (initp)
guard_cond
= cp_build_binary_op (input_location,
EQ_EXPR,
cp_build_unary_op (PREINCREMENT_EXPR,
guard,
/*noconvert=*/true,
tf_warning_or_error),
integer_one_node,
tf_warning_or_error);
else
guard_cond
= cp_build_binary_op (input_location,
EQ_EXPR,
cp_build_unary_op (PREDECREMENT_EXPR,
guard,
/*noconvert=*/true,
tf_warning_or_error),
integer_zero_node,
tf_warning_or_error);
guard_if_stmt = begin_if_stmt ();
finish_if_stmt_cond (guard_cond, guard_if_stmt);
}
/* If we're using __cxa_atexit, we have not already set the GUARD,
so we must do so now. */
if (guard && initp && flag_use_cxa_atexit)
finish_expr_stmt (set_guard (guard));
/* Perform the initialization or destruction. */
if (initp)
{
if (init)
{
finish_expr_stmt (init);
if (sanitize_flags_p (SANITIZE_ADDRESS, decl))
{
varpool_node *vnode = varpool_node::get (decl);
if (vnode)
vnode->dynamically_initialized = 1;
}
}
/* If we're using __cxa_atexit, register a function that calls the
destructor for the object. */
if (flag_use_cxa_atexit)
finish_expr_stmt (register_dtor_fn (decl));
}
else
finish_expr_stmt (build_cleanup (decl));
/* Finish the guard if-stmt, if necessary. */
if (guard)
{
finish_then_clause (guard_if_stmt);
finish_if_stmt (guard_if_stmt);
}
/* Now that we're done with DECL we don't need to pretend to be a
member of its class any longer. */
DECL_CONTEXT (current_function_decl) = NULL_TREE;
DECL_STATIC_FUNCTION_P (current_function_decl) = 0;
}
/* Generate code to do the initialization or destruction of the decls in VARS,
a TREE_LIST of VAR_DECL with static storage duration.
Whether initialization or destruction is performed is specified by INITP. */
static void
do_static_initialization_or_destruction (tree vars, bool initp)
{
tree node, init_if_stmt, cond;
/* Build the outer if-stmt to check for initialization or destruction. */
init_if_stmt = begin_if_stmt ();
cond = initp ? integer_one_node : integer_zero_node;
cond = cp_build_binary_op (input_location,
EQ_EXPR,
initialize_p_decl,
cond,
tf_warning_or_error);
finish_if_stmt_cond (cond, init_if_stmt);
/* To make sure dynamic construction doesn't access globals from other
compilation units where they might not be yet constructed, for
-fsanitize=address insert __asan_before_dynamic_init call that
prevents access to either all global variables that need construction
in other compilation units, or at least those that haven't been
initialized yet. Variables that need dynamic construction in
the current compilation unit are kept accessible. */
if (initp && (flag_sanitize & SANITIZE_ADDRESS))
finish_expr_stmt (asan_dynamic_init_call (/*after_p=*/false));
node = vars;
do {
tree decl = TREE_VALUE (node);
tree priority_if_stmt;
int priority;
priority_info pi;
/* If we don't need a destructor, there's nothing to do. Avoid
creating a possibly empty if-stmt. */
if (!initp && TYPE_HAS_TRIVIAL_DESTRUCTOR (TREE_TYPE (decl)))
{
node = TREE_CHAIN (node);
continue;
}
/* Remember that we had an initialization or finalization at this
priority. */
priority = DECL_EFFECTIVE_INIT_PRIORITY (decl);
pi = get_priority_info (priority);
if (initp)
pi->initializations_p = 1;
else
pi->destructions_p = 1;
/* Conditionalize this initialization on being in the right priority
and being initializing/finalizing appropriately. */
priority_if_stmt = begin_if_stmt ();
cond = cp_build_binary_op (input_location,
EQ_EXPR,
priority_decl,
build_int_cst (NULL_TREE, priority),
tf_warning_or_error);
finish_if_stmt_cond (cond, priority_if_stmt);
/* Process initializers with same priority. */
for (; node
&& DECL_EFFECTIVE_INIT_PRIORITY (TREE_VALUE (node)) == priority;
node = TREE_CHAIN (node))
/* Do one initialization or destruction. */
one_static_initialization_or_destruction (TREE_VALUE (node),
TREE_PURPOSE (node), initp);
/* Finish up the priority if-stmt body. */
finish_then_clause (priority_if_stmt);
finish_if_stmt (priority_if_stmt);
} while (node);
/* Revert what __asan_before_dynamic_init did by calling
__asan_after_dynamic_init. */
if (initp && (flag_sanitize & SANITIZE_ADDRESS))
finish_expr_stmt (asan_dynamic_init_call (/*after_p=*/true));
/* Finish up the init/destruct if-stmt body. */
finish_then_clause (init_if_stmt);
finish_if_stmt (init_if_stmt);
}
/* VARS is a list of variables with static storage duration which may
need initialization and/or finalization. Remove those variables
that don't really need to be initialized or finalized, and return
the resulting list. The order in which the variables appear in
VARS is in reverse order of the order in which they should actually
be initialized. The list we return is in the unreversed order;
i.e., the first variable should be initialized first. */
static tree
prune_vars_needing_no_initialization (tree *vars)
{
tree *var = vars;
tree result = NULL_TREE;
while (*var)
{
tree t = *var;
tree decl = TREE_VALUE (t);
tree init = TREE_PURPOSE (t);
/* Deal gracefully with error. */
if (error_operand_p (decl))
{
var = &TREE_CHAIN (t);
continue;
}
/* The only things that can be initialized are variables. */
gcc_assert (VAR_P (decl));
/* If this object is not defined, we don't need to do anything
here. */
if (DECL_EXTERNAL (decl))
{
var = &TREE_CHAIN (t);
continue;
}
/* Also, if the initializer already contains errors, we can bail
out now. */
if (init && TREE_CODE (init) == TREE_LIST
&& value_member (error_mark_node, init))
{
var = &TREE_CHAIN (t);
continue;
}
/* This variable is going to need initialization and/or
finalization, so we add it to the list. */
*var = TREE_CHAIN (t);
TREE_CHAIN (t) = result;
result = t;
}
return result;
}
/* Make sure we have told the back end about all the variables in
VARS. */
static void
write_out_vars (tree vars)
{
tree v;
for (v = vars; v; v = TREE_CHAIN (v))
{
tree var = TREE_VALUE (v);
if (!var_finalized_p (var))
{
import_export_decl (var);
rest_of_decl_compilation (var, 1, 1);
}
}
}
/* Generate a static constructor (if CONSTRUCTOR_P) or destructor
(otherwise) that will initialize all global objects with static
storage duration having the indicated PRIORITY. */
static void
generate_ctor_or_dtor_function (bool constructor_p, int priority,
location_t *locus)
{
char function_key;
tree fndecl;
tree body;
size_t i;
input_location = *locus;
/* ??? */
/* Was: locus->line++; */
/* We use `I' to indicate initialization and `D' to indicate
destruction. */
function_key = constructor_p ? 'I' : 'D';
/* We emit the function lazily, to avoid generating empty
global constructors and destructors. */
body = NULL_TREE;
/* For Objective-C++, we may need to initialize metadata found in this module.
This must be done _before_ any other static initializations. */
if (c_dialect_objc () && (priority == DEFAULT_INIT_PRIORITY)
&& constructor_p && objc_static_init_needed_p ())
{
body = start_objects (function_key, priority);
objc_generate_static_init_call (NULL_TREE);
}
/* Call the static storage duration function with appropriate
arguments. */
FOR_EACH_VEC_SAFE_ELT (ssdf_decls, i, fndecl)
{
/* Calls to pure or const functions will expand to nothing. */
if (! (flags_from_decl_or_type (fndecl) & (ECF_CONST | ECF_PURE)))
{
tree call;
if (! body)
body = start_objects (function_key, priority);
call = cp_build_function_call_nary (fndecl, tf_warning_or_error,
build_int_cst (NULL_TREE,
constructor_p),
build_int_cst (NULL_TREE,
priority),
NULL_TREE);
finish_expr_stmt (call);
}
}
/* Close out the function. */
if (body)
finish_objects (function_key, priority, body);
}
/* Generate constructor and destructor functions for the priority
indicated by N. */
static int
generate_ctor_and_dtor_functions_for_priority (splay_tree_node n, void * data)
{
location_t *locus = (location_t *) data;
int priority = (int) n->key;
priority_info pi = (priority_info) n->value;
/* Generate the functions themselves, but only if they are really
needed. */
if (pi->initializations_p)
generate_ctor_or_dtor_function (/*constructor_p=*/true, priority, locus);
if (pi->destructions_p)
generate_ctor_or_dtor_function (/*constructor_p=*/false, priority, locus);
/* Keep iterating. */
return 0;
}
/* Return C++ property of T, based on given operation OP. */
static int
cpp_check (tree t, cpp_operation op)
{
switch (op)
{
case HAS_DEPENDENT_TEMPLATE_ARGS:
{
tree ti = CLASSTYPE_TEMPLATE_INFO (t);
if (!ti)
return 0;
++processing_template_decl;
const bool dep = any_dependent_template_arguments_p (TI_ARGS (ti));
--processing_template_decl;
return dep;
}
case IS_ABSTRACT:
return DECL_PURE_VIRTUAL_P (t);
case IS_CONSTRUCTOR:
return DECL_CONSTRUCTOR_P (t);
case IS_DESTRUCTOR:
return DECL_DESTRUCTOR_P (t);
case IS_COPY_CONSTRUCTOR:
return DECL_COPY_CONSTRUCTOR_P (t);
case IS_MOVE_CONSTRUCTOR:
return DECL_MOVE_CONSTRUCTOR_P (t);
case IS_TEMPLATE:
return TREE_CODE (t) == TEMPLATE_DECL;
case IS_TRIVIAL:
return trivial_type_p (t);
default:
return 0;
}
}
/* Collect source file references recursively, starting from NAMESPC. */
static void
collect_source_refs (tree namespc)
{
/* Iterate over names in this name space. */
for (tree t = NAMESPACE_LEVEL (namespc)->names; t; t = TREE_CHAIN (t))
if (DECL_IS_BUILTIN (t))
;
else if (TREE_CODE (t) == NAMESPACE_DECL && !DECL_NAMESPACE_ALIAS (t))
collect_source_refs (t);
else
collect_source_ref (DECL_SOURCE_FILE (t));
}
/* Collect decls relevant to SOURCE_FILE from all namespaces recursively,
starting from NAMESPC. */
static void
collect_ada_namespace (tree namespc, const char *source_file)
{
tree decl = NAMESPACE_LEVEL (namespc)->names;
/* Collect decls from this namespace. This will skip
NAMESPACE_DECLs (both aliases and regular, it cannot tell). */
collect_ada_nodes (decl, source_file);
/* Now scan for namespace children, and dump them. */
for (; decl; decl = TREE_CHAIN (decl))
if (TREE_CODE (decl) == NAMESPACE_DECL && !DECL_NAMESPACE_ALIAS (decl))
collect_ada_namespace (decl, source_file);
}
/* Returns true iff there is a definition available for variable or
function DECL. */
bool
decl_defined_p (tree decl)
{
if (TREE_CODE (decl) == FUNCTION_DECL)
return (DECL_INITIAL (decl) != NULL_TREE
/* A pending instantiation of a friend temploid is defined. */
|| (DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (decl)
&& DECL_INITIAL (DECL_TEMPLATE_RESULT
(DECL_TI_TEMPLATE (decl)))));
else
{
gcc_assert (VAR_P (decl));
return !DECL_EXTERNAL (decl);
}
}
/* Nonzero for a VAR_DECL whose value can be used in a constant expression.
[expr.const]
An integral constant-expression can only involve ... const
variables of integral or enumeration types initialized with
constant expressions ...
C++0x also allows constexpr variables and temporaries initialized
with constant expressions. We handle the former here, but the latter
are just folded away in cxx_eval_constant_expression.
The standard does not require that the expression be non-volatile.
G++ implements the proposed correction in DR 457. */
bool
decl_constant_var_p (tree decl)
{
if (!decl_maybe_constant_var_p (decl))
return false;
/* We don't know if a template static data member is initialized with
a constant expression until we instantiate its initializer. Even
in the case of a constexpr variable, we can't treat it as a
constant until its initializer is complete in case it's used in
its own initializer. */
maybe_instantiate_decl (decl);
return DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl);
}
/* Returns true if DECL could be a symbolic constant variable, depending on
its initializer. */
bool
decl_maybe_constant_var_p (tree decl)
{
tree type = TREE_TYPE (decl);
if (!VAR_P (decl))
return false;
if (DECL_DECLARED_CONSTEXPR_P (decl))
return true;
if (DECL_HAS_VALUE_EXPR_P (decl))
/* A proxy isn't constant. */
return false;
if (TREE_CODE (type) == REFERENCE_TYPE)
/* References can be constant. */;
else if (CP_TYPE_CONST_NON_VOLATILE_P (type)
&& INTEGRAL_OR_ENUMERATION_TYPE_P (type))
/* And const integers. */;
else
return false;
if (DECL_INITIAL (decl)
&& !DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl))
/* We know the initializer, and it isn't constant. */
return false;
else
return true;
}
/* Complain that DECL uses a type with no linkage. In C++98 mode this is
called from grokfndecl and grokvardecl; in all modes it is called from
cp_write_global_declarations. */
void
no_linkage_error (tree decl)
{
if (cxx_dialect >= cxx11 && decl_defined_p (decl))
/* In C++11 it's ok if the decl is defined. */
return;
tree t = no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/false);
if (t == NULL_TREE)
/* The type that got us on no_linkage_decls must have gotten a name for
linkage purposes. */;
else if (CLASS_TYPE_P (t) && TYPE_BEING_DEFINED (t))
/* The type might end up having a typedef name for linkage purposes. */
vec_safe_push (no_linkage_decls, decl);
else if (TYPE_UNNAMED_P (t))
{
bool d = false;
if (cxx_dialect >= cxx11)
d = permerror (DECL_SOURCE_LOCATION (decl), "%q#D, declared using "
"unnamed type, is used but never defined", decl);
else if (DECL_EXTERN_C_P (decl))
/* Allow this; it's pretty common in C. */;
else if (VAR_P (decl))
/* DRs 132, 319 and 389 seem to indicate types with
no linkage can only be used to declare extern "C"
entities. Since it's not always an error in the
ISO C++ 90 Standard, we only issue a warning. */
d = warning_at (DECL_SOURCE_LOCATION (decl), 0, "unnamed type "
"with no linkage used to declare variable %q#D with "
"linkage", decl);
else
d = permerror (DECL_SOURCE_LOCATION (decl), "unnamed type with no "
"linkage used to declare function %q#D with linkage",
decl);
if (d && is_typedef_decl (TYPE_NAME (t)))
inform (DECL_SOURCE_LOCATION (TYPE_NAME (t)), "%q#D does not refer "
"to the unqualified type, so it is not used for linkage",
TYPE_NAME (t));
}
else if (cxx_dialect >= cxx11)
{
if (VAR_P (decl) || !DECL_PURE_VIRTUAL_P (decl))
permerror (DECL_SOURCE_LOCATION (decl),
"%q#D, declared using local type "
"%qT, is used but never defined", decl, t);
}
else if (VAR_P (decl))
warning_at (DECL_SOURCE_LOCATION (decl), 0, "type %qT with no linkage "
"used to declare variable %q#D with linkage", t, decl);
else
permerror (DECL_SOURCE_LOCATION (decl), "type %qT with no linkage used "
"to declare function %q#D with linkage", t, decl);
}
/* Collect declarations from all namespaces relevant to SOURCE_FILE. */
static void
collect_all_refs (const char *source_file)
{
collect_ada_namespace (global_namespace, source_file);
}
/* Clear DECL_EXTERNAL for NODE. */
static bool
clear_decl_external (struct cgraph_node *node, void * /*data*/)
{
DECL_EXTERNAL (node->decl) = 0;
return false;
}
/* Build up the function to run dynamic initializers for thread_local
variables in this translation unit and alias the init functions for the
individual variables to it. */
static void
handle_tls_init (void)
{
tree vars = prune_vars_needing_no_initialization (&tls_aggregates);
if (vars == NULL_TREE)
return;
location_t loc = DECL_SOURCE_LOCATION (TREE_VALUE (vars));
write_out_vars (vars);
tree guard = build_decl (loc, VAR_DECL, get_identifier ("__tls_guard"),
boolean_type_node);
TREE_PUBLIC (guard) = false;
TREE_STATIC (guard) = true;
DECL_ARTIFICIAL (guard) = true;
DECL_IGNORED_P (guard) = true;
TREE_USED (guard) = true;
CP_DECL_THREAD_LOCAL_P (guard) = true;
set_decl_tls_model (guard, decl_default_tls_model (guard));
pushdecl_top_level_and_finish (guard, NULL_TREE);
tree fn = get_local_tls_init_fn ();
start_preparsed_function (fn, NULL_TREE, SF_PRE_PARSED);
tree body = begin_function_body ();
tree if_stmt = begin_if_stmt ();
tree cond = cp_build_unary_op (TRUTH_NOT_EXPR, guard, false,
tf_warning_or_error);
finish_if_stmt_cond (cond, if_stmt);
finish_expr_stmt (cp_build_modify_expr (loc, guard, NOP_EXPR,
boolean_true_node,
tf_warning_or_error));
for (; vars; vars = TREE_CHAIN (vars))
{
tree var = TREE_VALUE (vars);
tree init = TREE_PURPOSE (vars);
one_static_initialization_or_destruction (var, init, true);
/* Output init aliases even with -fno-extern-tls-init. */
if (TARGET_SUPPORTS_ALIASES && TREE_PUBLIC (var))
{
tree single_init_fn = get_tls_init_fn (var);
if (single_init_fn == NULL_TREE)
continue;
cgraph_node *alias
= cgraph_node::get_create (fn)->create_same_body_alias
(single_init_fn, fn);
gcc_assert (alias != NULL);
}
}
finish_then_clause (if_stmt);
finish_if_stmt (if_stmt);
finish_function_body (body);
expand_or_defer_fn (finish_function (/*inline_p=*/false));
}
/* We're at the end of compilation, so generate any mangling aliases that
we've been saving up, if DECL is going to be output and ID2 isn't
already taken by another declaration. */
static void
generate_mangling_alias (tree decl, tree id2)
{
struct cgraph_node *n = NULL;
if (TREE_CODE (decl) == FUNCTION_DECL)
{
n = cgraph_node::get (decl);
if (!n)
/* Don't create an alias to an unreferenced function. */
return;
}
tree *slot
= mangled_decls->find_slot_with_hash (id2, IDENTIFIER_HASH_VALUE (id2),
INSERT);
/* If there's a declaration already using this mangled name,
don't create a compatibility alias that conflicts. */
if (*slot)
return;
tree alias = make_alias_for (decl, id2);
*slot = alias;
DECL_IGNORED_P (alias) = 1;
TREE_PUBLIC (alias) = TREE_PUBLIC (decl);
DECL_VISIBILITY (alias) = DECL_VISIBILITY (decl);
if (vague_linkage_p (decl))
DECL_WEAK (alias) = 1;
if (n)
n->create_same_body_alias (alias, decl);
else
varpool_node::create_extra_name_alias (alias, decl);
}
/* Note that we might want to emit an alias with the symbol ID2 for DECL at
the end of translation, for compatibility across bugs in the mangling
implementation. */
void
note_mangling_alias (tree decl, tree id2)
{
if (TARGET_SUPPORTS_ALIASES)
{
if (!defer_mangling_aliases)
generate_mangling_alias (decl, id2);
else
{
vec_safe_push (mangling_aliases, decl);
vec_safe_push (mangling_aliases, id2);
}
}
}
/* Emit all mangling aliases that were deferred up to this point. */
void
generate_mangling_aliases ()
{
while (!vec_safe_is_empty (mangling_aliases))
{
tree id2 = mangling_aliases->pop();
tree decl = mangling_aliases->pop();
generate_mangling_alias (decl, id2);
}
defer_mangling_aliases = false;
}
/* Record a mangling of DECL, whose DECL_ASSEMBLER_NAME has just been
set. NEED_WARNING is true if we must warn about collisions. We do
this to spot changes in mangling that may require compatibility
aliases. */
void
record_mangling (tree decl, bool need_warning)
{
if (!mangled_decls)
mangled_decls = hash_table<mangled_decl_hash>::create_ggc (499);
gcc_checking_assert (DECL_ASSEMBLER_NAME_SET_P (decl));
tree id = DECL_ASSEMBLER_NAME_RAW (decl);
tree *slot
= mangled_decls->find_slot_with_hash (id, IDENTIFIER_HASH_VALUE (id),
INSERT);
/* If this is already an alias, remove the alias, because the real
decl takes precedence. */
if (*slot && DECL_ARTIFICIAL (*slot) && DECL_IGNORED_P (*slot))
if (symtab_node *n = symtab_node::get (*slot))
if (n->cpp_implicit_alias)
{
n->remove ();
*slot = NULL_TREE;
}
if (!*slot)
*slot = decl;
else if (need_warning)
{
error_at (DECL_SOURCE_LOCATION (decl),
"mangling of %q#D as %qE conflicts with a previous mangle",
decl, id);
inform (DECL_SOURCE_LOCATION (*slot),
"previous mangling %q#D", *slot);
inform (DECL_SOURCE_LOCATION (decl),
"a later -fabi-version= (or =0)"
" avoids this error with a change in mangling");
*slot = decl;
}
}
/* The mangled name of DECL is being forcibly changed to NAME. Remove
any existing knowledge of DECL's mangled name meaning DECL. */
void
overwrite_mangling (tree decl, tree name)
{
if (tree id = DECL_ASSEMBLER_NAME_RAW (decl))
if ((TREE_CODE (decl) == VAR_DECL
|| TREE_CODE (decl) == FUNCTION_DECL)
&& mangled_decls)
if (tree *slot
= mangled_decls->find_slot_with_hash (id, IDENTIFIER_HASH_VALUE (id),
NO_INSERT))
if (*slot == decl)
{
mangled_decls->clear_slot (slot);
/* If this is an alias, remove it from the symbol table. */
if (DECL_ARTIFICIAL (decl) && DECL_IGNORED_P (decl))
if (symtab_node *n = symtab_node::get (decl))
if (n->cpp_implicit_alias)
n->remove ();
}
DECL_ASSEMBLER_NAME_RAW (decl) = name;
}
/* The entire file is now complete. If requested, dump everything
to a file. */
static void
dump_tu (void)
{
dump_flags_t flags;
if (FILE *stream = dump_begin (raw_dump_id, &flags))
{
dump_node (global_namespace, flags & ~TDF_SLIM, stream);
dump_end (raw_dump_id, stream);
}
}
static location_t locus_at_end_of_parsing;
/* Check the deallocation functions for CODE to see if we want to warn that
only one was defined. */
static void
maybe_warn_sized_delete (enum tree_code code)
{
tree sized = NULL_TREE;
tree unsized = NULL_TREE;
for (ovl_iterator iter (get_global_binding (ovl_op_identifier (false, code)));
iter; ++iter)
{
tree fn = *iter;
/* We're only interested in usual deallocation functions. */
if (!usual_deallocation_fn_p (fn))
continue;
if (FUNCTION_ARG_CHAIN (fn) == void_list_node)
unsized = fn;
else
sized = fn;
}
if (DECL_INITIAL (unsized) && !DECL_INITIAL (sized))
warning_at (DECL_SOURCE_LOCATION (unsized), OPT_Wsized_deallocation,
"the program should also define %qD", sized);
else if (!DECL_INITIAL (unsized) && DECL_INITIAL (sized))
warning_at (DECL_SOURCE_LOCATION (sized), OPT_Wsized_deallocation,
"the program should also define %qD", unsized);
}
/* Check the global deallocation functions to see if we want to warn about
defining unsized without sized (or vice versa). */
static void
maybe_warn_sized_delete ()
{
if (!flag_sized_deallocation || !warn_sized_deallocation)
return;
maybe_warn_sized_delete (DELETE_EXPR);
maybe_warn_sized_delete (VEC_DELETE_EXPR);
}
/* Earlier we left PTRMEM_CST in variable initializers alone so that we could
look them up when evaluating non-type template parameters. Now we need to
lower them to something the back end can understand. */
static void
lower_var_init ()
{
varpool_node *node;
FOR_EACH_VARIABLE (node)
{
tree d = node->decl;
if (tree init = DECL_INITIAL (d))
DECL_INITIAL (d) = cplus_expand_constant (init);
}
}
/* This routine is called at the end of compilation.
Its job is to create all the code needed to initialize and
destroy the global aggregates. We do the destruction
first, since that way we only need to reverse the decls once. */
void
c_parse_final_cleanups (void)
{
tree vars;
bool reconsider;
size_t i;
unsigned ssdf_count = 0;
int retries = 0;
tree decl;
locus_at_end_of_parsing = input_location;
at_eof = 1;
/* Bad parse errors. Just forget about it. */
if (! global_bindings_p () || current_class_type
|| !vec_safe_is_empty (decl_namespace_list))
return;
/* This is the point to write out a PCH if we're doing that.
In that case we do not want to do anything else. */
if (pch_file)
{
/* Mangle all symbols at PCH creation time. */
symtab_node *node;
FOR_EACH_SYMBOL (node)
if (! is_a <varpool_node *> (node)
|| ! DECL_HARD_REGISTER (node->decl))
DECL_ASSEMBLER_NAME (node->decl);
c_common_write_pch ();
dump_tu ();
/* Ensure even the callers don't try to finalize the CU. */
flag_syntax_only = 1;
return;
}
timevar_stop (TV_PHASE_PARSING);
timevar_start (TV_PHASE_DEFERRED);
symtab->process_same_body_aliases ();
/* Handle -fdump-ada-spec[-slim] */
if (flag_dump_ada_spec || flag_dump_ada_spec_slim)
{
if (flag_dump_ada_spec_slim)
collect_source_ref (main_input_filename);
else
collect_source_refs (global_namespace);
dump_ada_specs (collect_all_refs, cpp_check);
}
/* FIXME - huh? was input_line -= 1;*/
/* We now have to write out all the stuff we put off writing out.
These include:
o Template specializations that we have not yet instantiated,
but which are needed.
o Initialization and destruction for non-local objects with
static storage duration. (Local objects with static storage
duration are initialized when their scope is first entered,
and are cleaned up via atexit.)
o Virtual function tables.
All of these may cause others to be needed. For example,
instantiating one function may cause another to be needed, and
generating the initializer for an object may cause templates to be
instantiated, etc., etc. */
emit_support_tinfos ();
do
{
tree t;
tree decl;
reconsider = false;
/* If there are templates that we've put off instantiating, do
them now. */
instantiate_pending_templates (retries);
ggc_collect ();
/* Write out virtual tables as required. Writing out the
virtual table for a template class may cause the
instantiation of members of that class. If we write out
vtables then we remove the class from our list so we don't
have to look at it again. */
for (i = keyed_classes->length ();
keyed_classes->iterate (--i, &t);)
if (maybe_emit_vtables (t))
{
reconsider = true;
keyed_classes->unordered_remove (i);
}
/* The input_location may have been changed during marking of
vtable entries. */
input_location = locus_at_end_of_parsing;
/* Write out needed type info variables. We have to be careful
looping through unemitted decls, because emit_tinfo_decl may
cause other variables to be needed. New elements will be
appended, and we remove from the vector those that actually
get emitted. */
for (i = unemitted_tinfo_decls->length ();
unemitted_tinfo_decls->iterate (--i, &t);)
if (emit_tinfo_decl (t))
{
reconsider = true;
unemitted_tinfo_decls->unordered_remove (i);
}
/* The list of objects with static storage duration is built up
in reverse order. We clear STATIC_AGGREGATES so that any new
aggregates added during the initialization of these will be
initialized in the correct order when we next come around the
loop. */
vars = prune_vars_needing_no_initialization (&static_aggregates);
if (vars)
{
/* We need to start a new initialization function each time
through the loop. That's because we need to know which
vtables have been referenced, and TREE_SYMBOL_REFERENCED
isn't computed until a function is finished, and written
out. That's a deficiency in the back end. When this is
fixed, these initialization functions could all become
inline, with resulting performance improvements. */
tree ssdf_body;
/* Set the line and file, so that it is obviously not from
the source file. */
input_location = locus_at_end_of_parsing;
ssdf_body = start_static_storage_duration_function (ssdf_count);
/* Make sure the back end knows about all the variables. */
write_out_vars (vars);
/* First generate code to do all the initializations. */
if (vars)
do_static_initialization_or_destruction (vars, /*initp=*/true);
/* Then, generate code to do all the destructions. Do these
in reverse order so that the most recently constructed
variable is the first destroyed. If we're using
__cxa_atexit, then we don't need to do this; functions
were registered at initialization time to destroy the
local statics. */
if (!flag_use_cxa_atexit && vars)
{
vars = nreverse (vars);
do_static_initialization_or_destruction (vars, /*initp=*/false);
}
else
vars = NULL_TREE;
/* Finish up the static storage duration function for this
round. */
input_location = locus_at_end_of_parsing;
finish_static_storage_duration_function (ssdf_body);
/* All those initializations and finalizations might cause
us to need more inline functions, more template
instantiations, etc. */
reconsider = true;
ssdf_count++;
/* ??? was: locus_at_end_of_parsing.line++; */
}
/* Now do the same for thread_local variables. */
handle_tls_init ();
/* Go through the set of inline functions whose bodies have not
been emitted yet. If out-of-line copies of these functions
are required, emit them. */
FOR_EACH_VEC_SAFE_ELT (deferred_fns, i, decl)
{
/* Does it need synthesizing? */
if (DECL_DEFAULTED_FN (decl) && ! DECL_INITIAL (decl)
&& (! DECL_REALLY_EXTERN (decl) || possibly_inlined_p (decl)))
{
/* Even though we're already at the top-level, we push
there again. That way, when we pop back a few lines
hence, all of our state is restored. Otherwise,
finish_function doesn't clean things up, and we end
up with CURRENT_FUNCTION_DECL set. */
push_to_top_level ();
/* The decl's location will mark where it was first
needed. Save that so synthesize method can indicate
where it was needed from, in case of error */
input_location = DECL_SOURCE_LOCATION (decl);
synthesize_method (decl);
pop_from_top_level ();
reconsider = true;
}
if (!DECL_INITIAL (decl) && decl_tls_wrapper_p (decl))
generate_tls_wrapper (decl);
if (!DECL_SAVED_TREE (decl))
continue;
cgraph_node *node = cgraph_node::get_create (decl);
/* We lie to the back end, pretending that some functions
are not defined when they really are. This keeps these
functions from being put out unnecessarily. But, we must
stop lying when the functions are referenced, or if they
are not comdat since they need to be put out now. If
DECL_INTERFACE_KNOWN, then we have already set
DECL_EXTERNAL appropriately, so there's no need to check
again, and we do not want to clear DECL_EXTERNAL if a
previous call to import_export_decl set it.
This is done in a separate for cycle, because if some
deferred function is contained in another deferred
function later in deferred_fns varray,
rest_of_compilation would skip this function and we
really cannot expand the same function twice. */
import_export_decl (decl);
if (DECL_NOT_REALLY_EXTERN (decl)
&& DECL_INITIAL (decl)
&& decl_needed_p (decl))
{
if (node->cpp_implicit_alias)
node = node->get_alias_target ();
node->call_for_symbol_thunks_and_aliases (clear_decl_external,
NULL, true);
/* If we mark !DECL_EXTERNAL one of the symbols in some comdat
group, we need to mark all symbols in the same comdat group
that way. */
if (node->same_comdat_group)
for (cgraph_node *next
= dyn_cast<cgraph_node *> (node->same_comdat_group);
next != node;
next = dyn_cast<cgraph_node *> (next->same_comdat_group))
next->call_for_symbol_thunks_and_aliases (clear_decl_external,
NULL, true);
}
/* If we're going to need to write this function out, and
there's already a body for it, create RTL for it now.
(There might be no body if this is a method we haven't
gotten around to synthesizing yet.) */
if (!DECL_EXTERNAL (decl)
&& decl_needed_p (decl)
&& !TREE_ASM_WRITTEN (decl)
&& !node->definition)
{
/* We will output the function; no longer consider it in this
loop. */
DECL_DEFER_OUTPUT (decl) = 0;
/* Generate RTL for this function now that we know we
need it. */
expand_or_defer_fn (decl);
/* If we're compiling -fsyntax-only pretend that this
function has been written out so that we don't try to
expand it again. */
if (flag_syntax_only)
TREE_ASM_WRITTEN (decl) = 1;
reconsider = true;
}
}
if (wrapup_namespace_globals ())
reconsider = true;
/* Static data members are just like namespace-scope globals. */
FOR_EACH_VEC_SAFE_ELT (pending_statics, i, decl)
{
if (var_finalized_p (decl) || DECL_REALLY_EXTERN (decl)
/* Don't write it out if we haven't seen a definition. */
|| (DECL_IN_AGGR_P (decl) && !DECL_INLINE_VAR_P (decl))
/* Or haven't instantiated it. */
|| (DECL_TEMPLATE_INSTANTIATION (decl)
&& !DECL_TEMPLATE_INSTANTIATED (decl)))
continue;
import_export_decl (decl);
/* If this static data member is needed, provide it to the
back end. */
if (DECL_NOT_REALLY_EXTERN (decl) && decl_needed_p (decl))
DECL_EXTERNAL (decl) = 0;
}
if (vec_safe_length (pending_statics) != 0
&& wrapup_global_declarations (pending_statics->address (),
pending_statics->length ()))
reconsider = true;
retries++;
}
while (reconsider);
lower_var_init ();
generate_mangling_aliases ();
/* All used inline functions must have a definition at this point. */
FOR_EACH_VEC_SAFE_ELT (deferred_fns, i, decl)
{
if (/* Check online inline functions that were actually used. */
DECL_ODR_USED (decl) && DECL_DECLARED_INLINE_P (decl)
/* If the definition actually was available here, then the
fact that the function was not defined merely represents
that for some reason (use of a template repository,
#pragma interface, etc.) we decided not to emit the
definition here. */
&& !DECL_INITIAL (decl)
/* Don't complain if the template was defined. */
&& !(DECL_TEMPLATE_INSTANTIATION (decl)
&& DECL_INITIAL (DECL_TEMPLATE_RESULT
(template_for_substitution (decl)))))
{
warning_at (DECL_SOURCE_LOCATION (decl), 0,
"inline function %qD used but never defined", decl);
/* Avoid a duplicate warning from check_global_declaration. */
TREE_NO_WARNING (decl) = 1;
}
}
/* So must decls that use a type with no linkage. */
FOR_EACH_VEC_SAFE_ELT (no_linkage_decls, i, decl)
no_linkage_error (decl);
maybe_warn_sized_delete ();
/* Then, do the Objective-C stuff. This is where all the
Objective-C module stuff gets generated (symtab,
class/protocol/selector lists etc). This must be done after C++
templates, destructors etc. so that selectors used in C++
templates are properly allocated. */
if (c_dialect_objc ())
objc_write_global_declarations ();
/* We give C linkage to static constructors and destructors. */
push_lang_context (lang_name_c);
/* Generate initialization and destruction functions for all
priorities for which they are required. */
if (priority_info_map)
splay_tree_foreach (priority_info_map,
generate_ctor_and_dtor_functions_for_priority,
/*data=*/&locus_at_end_of_parsing);
else if (c_dialect_objc () && objc_static_init_needed_p ())
/* If this is obj-c++ and we need a static init, call
generate_ctor_or_dtor_function. */
generate_ctor_or_dtor_function (/*constructor_p=*/true,
DEFAULT_INIT_PRIORITY,
&locus_at_end_of_parsing);
/* We're done with the splay-tree now. */
if (priority_info_map)
splay_tree_delete (priority_info_map);
/* Generate any missing aliases. */
maybe_apply_pending_pragma_weaks ();
/* We're done with static constructors, so we can go back to "C++"
linkage now. */
pop_lang_context ();
if (flag_vtable_verify)
{
vtv_recover_class_info ();
vtv_compute_class_hierarchy_transitive_closure ();
vtv_build_vtable_verify_fndecl ();
}
perform_deferred_noexcept_checks ();
finish_repo ();
fini_constexpr ();
/* The entire file is now complete. If requested, dump everything
to a file. */
dump_tu ();
if (flag_detailed_statistics)
{
dump_tree_statistics ();
dump_time_statistics ();
}
timevar_stop (TV_PHASE_DEFERRED);
timevar_start (TV_PHASE_PARSING);
/* Indicate that we're done with front end processing. */
at_eof = 2;
}
/* Perform any post compilation-proper cleanups for the C++ front-end.
This should really go away. No front-end should need to do
anything past the compilation process. */
void
cxx_post_compilation_parsing_cleanups (void)
{
timevar_start (TV_PHASE_LATE_PARSING_CLEANUPS);
if (flag_vtable_verify)
{
/* Generate the special constructor initialization function that
calls __VLTRegisterPairs, and give it a very high
initialization priority. This must be done after
finalize_compilation_unit so that we have accurate
information about which vtable will actually be emitted. */
vtv_generate_init_routine ();
}
input_location = locus_at_end_of_parsing;
if (flag_checking)
validate_conversion_obstack ();
timevar_stop (TV_PHASE_LATE_PARSING_CLEANUPS);
}
/* FN is an OFFSET_REF, DOTSTAR_EXPR or MEMBER_REF indicating the
function to call in parse-tree form; it has not yet been
semantically analyzed. ARGS are the arguments to the function.
They have already been semantically analyzed. This may change
ARGS. */
tree
build_offset_ref_call_from_tree (tree fn, vec<tree, va_gc> **args,
tsubst_flags_t complain)
{
tree orig_fn;
vec<tree, va_gc> *orig_args = NULL;
tree expr;
tree object;
orig_fn = fn;
object = TREE_OPERAND (fn, 0);
if (processing_template_decl)
{
gcc_assert (TREE_CODE (fn) == DOTSTAR_EXPR
|| TREE_CODE (fn) == MEMBER_REF);
if (type_dependent_expression_p (fn)
|| any_type_dependent_arguments_p (*args))
return build_min_nt_call_vec (fn, *args);
orig_args = make_tree_vector_copy (*args);
/* Transform the arguments and add the implicit "this"
parameter. That must be done before the FN is transformed
because we depend on the form of FN. */
make_args_non_dependent (*args);
object = build_non_dependent_expr (object);
if (TREE_CODE (TREE_TYPE (fn)) == METHOD_TYPE)
{
if (TREE_CODE (fn) == DOTSTAR_EXPR)
object = cp_build_addr_expr (object, complain);
vec_safe_insert (*args, 0, object);
}
/* Now that the arguments are done, transform FN. */
fn = build_non_dependent_expr (fn);
}
/* A qualified name corresponding to a bound pointer-to-member is
represented as an OFFSET_REF:
struct B { void g(); };
void (B::*p)();
void B::g() { (this->*p)(); } */
if (TREE_CODE (fn) == OFFSET_REF)
{
tree object_addr = cp_build_addr_expr (object, complain);
fn = TREE_OPERAND (fn, 1);
fn = get_member_function_from_ptrfunc (&object_addr, fn,
complain);
vec_safe_insert (*args, 0, object_addr);
}
if (CLASS_TYPE_P (TREE_TYPE (fn)))
expr = build_op_call (fn, args, complain);
else
expr = cp_build_function_call_vec (fn, args, complain);
if (processing_template_decl && expr != error_mark_node)
expr = build_min_non_dep_call_vec (expr, orig_fn, orig_args);
if (orig_args != NULL)
release_tree_vector (orig_args);
return expr;
}
void
check_default_args (tree x)
{
tree arg = TYPE_ARG_TYPES (TREE_TYPE (x));
bool saw_def = false;
int i = 0 - (TREE_CODE (TREE_TYPE (x)) == METHOD_TYPE);
for (; arg && arg != void_list_node; arg = TREE_CHAIN (arg), ++i)
{
if (TREE_PURPOSE (arg))
saw_def = true;
else if (saw_def && !PACK_EXPANSION_P (TREE_VALUE (arg)))
{
error ("default argument missing for parameter %P of %q+#D", i, x);
TREE_PURPOSE (arg) = error_mark_node;
}
}
}
/* Return true if function DECL can be inlined. This is used to force
instantiation of methods that might be interesting for inlining. */
bool
possibly_inlined_p (tree decl)
{
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
if (DECL_UNINLINABLE (decl))
return false;
if (!optimize)
return DECL_DECLARED_INLINE_P (decl);
/* When optimizing, we might inline everything when flatten
attribute or heuristics inlining for size or autoinlining
is used. */
return true;
}
/* Normally, we can wait until instantiation-time to synthesize DECL.
However, if DECL is a static data member initialized with a constant
or a constexpr function, we need it right now because a reference to
such a data member or a call to such function is not value-dependent.
For a function that uses auto in the return type, we need to instantiate
it to find out its type. For OpenMP user defined reductions, we need
them instantiated for reduction clauses which inline them by hand
directly. */
static void
maybe_instantiate_decl (tree decl)
{
if (DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_INFO (decl)
&& (decl_maybe_constant_var_p (decl)
|| (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_OMP_DECLARE_REDUCTION_P (decl))
|| undeduced_auto_decl (decl))
&& !DECL_DECLARED_CONCEPT_P (decl)
&& !uses_template_parms (DECL_TI_ARGS (decl)))
{
/* Instantiating a function will result in garbage collection. We
must treat this situation as if we were within the body of a
function so as to avoid collecting live data only referenced from
the stack (such as overload resolution candidates). */
++function_depth;
instantiate_decl (decl, /*defer_ok=*/false,
/*expl_inst_class_mem_p=*/false);
--function_depth;
}
}
/* Mark DECL (either a _DECL or a BASELINK) as "used" in the program.
If DECL is a specialization or implicitly declared class member,
generate the actual definition. Return false if something goes
wrong, true otherwise. */
bool
mark_used (tree decl, tsubst_flags_t complain)
{
/* If we're just testing conversions or resolving overloads, we
don't want any permanent effects like forcing functions to be
output or instantiating templates. */
if ((complain & tf_conv))
return true;
/* If DECL is a BASELINK for a single function, then treat it just
like the DECL for the function. Otherwise, if the BASELINK is
for an overloaded function, we don't know which function was
actually used until after overload resolution. */
if (BASELINK_P (decl))
{
decl = BASELINK_FUNCTIONS (decl);
if (really_overloaded_fn (decl))
return true;
decl = OVL_FIRST (decl);
}
/* Set TREE_USED for the benefit of -Wunused. */
TREE_USED (decl) = 1;
/* And for structured bindings also the underlying decl. */
if (DECL_DECOMPOSITION_P (decl) && DECL_DECOMP_BASE (decl))
TREE_USED (DECL_DECOMP_BASE (decl)) = 1;
if (TREE_CODE (decl) == TEMPLATE_DECL)
return true;
if (DECL_CLONED_FUNCTION_P (decl))
TREE_USED (DECL_CLONED_FUNCTION (decl)) = 1;
/* Mark enumeration types as used. */
if (TREE_CODE (decl) == CONST_DECL)
used_types_insert (DECL_CONTEXT (decl));
if (TREE_CODE (decl) == FUNCTION_DECL
&& !maybe_instantiate_noexcept (decl, complain))
return false;
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DELETED_FN (decl))
{
if (DECL_ARTIFICIAL (decl)
&& DECL_CONV_FN_P (decl)
&& LAMBDA_TYPE_P (DECL_CONTEXT (decl)))
/* We mark a lambda conversion op as deleted if we can't
generate it properly; see maybe_add_lambda_conv_op. */
sorry ("converting lambda that uses %<...%> to function pointer");
else if (complain & tf_error)
{
error ("use of deleted function %qD", decl);
if (!maybe_explain_implicit_delete (decl))
inform (DECL_SOURCE_LOCATION (decl), "declared here");
}
return false;
}
if (TREE_DEPRECATED (decl) && (complain & tf_warning)
&& deprecated_state != DEPRECATED_SUPPRESS)
warn_deprecated_use (decl, NULL_TREE);
/* We can only check DECL_ODR_USED on variables or functions with
DECL_LANG_SPECIFIC set, and these are also the only decls that we
might need special handling for. */
if (!VAR_OR_FUNCTION_DECL_P (decl)
|| DECL_LANG_SPECIFIC (decl) == NULL
|| DECL_THUNK_P (decl))
{
if (!processing_template_decl
&& !require_deduced_type (decl, complain))
return false;
return true;
}
/* We only want to do this processing once. We don't need to keep trying
to instantiate inline templates, because unit-at-a-time will make sure
we get them compiled before functions that want to inline them. */
if (DECL_ODR_USED (decl))
return true;
/* Normally, we can wait until instantiation-time to synthesize DECL.
However, if DECL is a static data member initialized with a constant
or a constexpr function, we need it right now because a reference to
such a data member or a call to such function is not value-dependent.
For a function that uses auto in the return type, we need to instantiate
it to find out its type. For OpenMP user defined reductions, we need
them instantiated for reduction clauses which inline them by hand
directly. */
maybe_instantiate_decl (decl);
if (processing_template_decl || in_template_function ())
return true;
/* Check this too in case we're within instantiate_non_dependent_expr. */
if (DECL_TEMPLATE_INFO (decl)
&& uses_template_parms (DECL_TI_ARGS (decl)))
return true;
if (!require_deduced_type (decl, complain))
return false;
if (builtin_pack_fn_p (decl))
{
error ("use of built-in parameter pack %qD outside of a template",
DECL_NAME (decl));
return false;
}
/* If we don't need a value, then we don't need to synthesize DECL. */
if (cp_unevaluated_operand || in_discarded_stmt)
return true;
DECL_ODR_USED (decl) = 1;
if (DECL_CLONED_FUNCTION_P (decl))
DECL_ODR_USED (DECL_CLONED_FUNCTION (decl)) = 1;
/* DR 757: A type without linkage shall not be used as the type of a
variable or function with linkage, unless
o the variable or function has extern "C" linkage (7.5 [dcl.link]), or
o the variable or function is not used (3.2 [basic.def.odr]) or is
defined in the same translation unit. */
if (cxx_dialect > cxx98
&& decl_linkage (decl) != lk_none
&& !DECL_EXTERN_C_P (decl)
&& !DECL_ARTIFICIAL (decl)
&& !decl_defined_p (decl)
&& no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/false))
{
if (is_local_extern (decl))
/* There's no way to define a local extern, and adding it to
the vector interferes with GC, so give an error now. */
no_linkage_error (decl);
else
vec_safe_push (no_linkage_decls, decl);
}
if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl)
&& !DECL_INITIAL (decl) && !DECL_ARTIFICIAL (decl))
/* Remember it, so we can check it was defined. */
note_vague_linkage_fn (decl);
/* Is it a synthesized method that needs to be synthesized? */
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)
&& DECL_DEFAULTED_FN (decl)
/* A function defaulted outside the class is synthesized either by
cp_finish_decl or instantiate_decl. */
&& !DECL_DEFAULTED_OUTSIDE_CLASS_P (decl)
&& ! DECL_INITIAL (decl))
{
/* Defer virtual destructors so that thunks get the right
linkage. */
if (DECL_VIRTUAL_P (decl) && !at_eof)
{
note_vague_linkage_fn (decl);
return true;
}
/* Remember the current location for a function we will end up
synthesizing. Then we can inform the user where it was
required in the case of error. */
DECL_SOURCE_LOCATION (decl) = input_location;
/* Synthesizing an implicitly defined member function will result in
garbage collection. We must treat this situation as if we were
within the body of a function so as to avoid collecting live data
on the stack (such as overload resolution candidates).
We could just let cp_write_global_declarations handle synthesizing
this function by adding it to deferred_fns, but doing
it at the use site produces better error messages. */
++function_depth;
synthesize_method (decl);
--function_depth;
/* If this is a synthesized method we don't need to
do the instantiation test below. */
}
else if (VAR_OR_FUNCTION_DECL_P (decl)
&& DECL_TEMPLATE_INFO (decl)
&& !DECL_DECLARED_CONCEPT_P (decl)
&& (!DECL_EXPLICIT_INSTANTIATION (decl)
|| always_instantiate_p (decl)))
/* If this is a function or variable that is an instance of some
template, we now know that we will need to actually do the
instantiation. We check that DECL is not an explicit
instantiation because that is not checked in instantiate_decl.
We put off instantiating functions in order to improve compile
times. Maintaining a stack of active functions is expensive,
and the inliner knows to instantiate any functions it might
need. Therefore, we always try to defer instantiation. */
{
++function_depth;
instantiate_decl (decl, /*defer_ok=*/true,
/*expl_inst_class_mem_p=*/false);
--function_depth;
}
return true;
}
bool
mark_used (tree decl)
{
return mark_used (decl, tf_warning_or_error);
}
tree
vtv_start_verification_constructor_init_function (void)
{
return start_objects ('I', MAX_RESERVED_INIT_PRIORITY - 1);
}
tree
vtv_finish_verification_constructor_init_function (tree function_body)
{
tree fn;
finish_compound_stmt (function_body);
fn = finish_function (/*inline_p=*/false);
DECL_STATIC_CONSTRUCTOR (fn) = 1;
decl_init_priority_insert (fn, MAX_RESERVED_INIT_PRIORITY - 1);
return fn;
}
#include "gt-cp-decl2.h"
|
engalmod.c | /* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@file engalmod.c
@brief c-routines for function calls from Fortran in an enhanced
galmod environment
This module contains two functions that provide a very specialised
chisquare evaluation when comparing an observed cube with a cube
that is passed by galmod of the GIPSY environment. The c functions
are meant for a hack into galmod and therefore have as such no
use. There is one initialiser routine that initialises the
functionality, i.e., allocates memory, when needed and safes
variables that don't change if a lot of models are passed to the
second routine the chisquare evaluation routine. This does nothing
but give back a chisquare when comparing the original cube and the
model. Both are passed as float arrays only. The module will be
compiled as a library, so that only the library libengalmod.a will
be needed except this include file.
This is the first stable version.
@todo A lot of optimisation: As the array size doesn't change
calculate the fftw_plans ONCE excessively and safe them to global
variables. Make an array of the parts of the gaussian beam that are
redundant (in the hope that it makes things faster). It seems that
a division is a long process: think about it. Test whether
powf(x,2) is faster than x*x (should be...).
$Source: /Volumes/DATA_J_II/data/CVS/tirific/src/engalmod.c,v $
$Date: 2011/05/25 22:25:26 $
$Revision: 1.44 $
$Author: jozsa $
$Log: engalmod.c,v $
Revision 1.44 2011/05/25 22:25:26 jozsa
Left work
Revision 1.43 2011/05/11 13:37:12 jozsa
Left work
Revision 1.42 2011/05/10 00:30:15 jozsa
Left work
Revision 1.41 2009/05/26 07:56:40 jozsa
Left work
Revision 1.40 2007/08/22 15:58:40 gjozsa
Left work
Revision 1.39 2006/04/07 11:13:32 gjozsa
simple BUGFIX
Revision 1.38 2006/04/06 10:39:09 gjozsa
Included function engalmod_chflgs
Revision 1.37 2006/04/03 11:47:46 gjozsa
included masking, fixed a mask to be present if pixval < -1024
Revision 1.36 2005/04/20 13:26:24 gjozsa
Left work
Revision 1.35 2005/04/12 15:52:14 gjozsa
Left work
Revision 1.34 2005/04/07 12:45:47 gjozsa
Bugfix
Revision 1.33 2005/04/06 05:58:24 gjozsa
Bugfix: init now corrects the noiseweight to 1 in case of mode%2
Revision 1.32 2005/04/04 08:42:19 gjozsa
Left work
Revision 1.31 2005/04/01 12:37:11 gjozsa
Large improvements, repeated calls with same velocity dispersion are much faster
Revision 1.29 2005/03/11 17:45:54 gjozsa
Left work
Revision 1.28 2005/03/04 18:13:53 gjozsa
Left work
Revision 1.27 2005/03/02 17:56:09 gjozsa
Left work
Revision 1.26 2005/01/17 12:13:34 gjozsa
Left work
Revision 1.25 2005/01/06 10:44:10 gjozsa
Left work
Revision 1.24 2005/01/05 15:33:02 gjozsa
Left work
Revision 1.23 2004/12/30 13:36:05 gjozsa
Added probability evaluation and out-of-place fft
Revision 1.22 2004/12/27 12:54:40 gjozsa
Last updatde before commenting, no changes anymore allowed
Revision 1.21 2004/12/23 20:20:50 gjozsa
some minor changes, leaves the implementation of arbitrary arrays
Revision 1.18 2004/12/22 17:33:57 gjozsa
Left work
Revision 1.14 2004/12/21 18:42:12 gjozsa
Left work
Revision 1.10 2004/12/21 17:50:21 gjozsa
some changes
Revision 1.7 2004/12/20 14:55:58 gjozsa
Left work
Revision 1.5 2004/12/20 10:44:12 gjozsa
added
Revision 1.4 2004/12/17 14:13:40 gjozsa
First debugged running version
Revision 1.3 2004/12/16 13:19:51 gjozsa
Left work
Revision 1.1 2004/12/11 17:44:51 gjozsa
Added to CVS control
*/
/* ------------------------------------------------------------ */
/* void fftw_execute_dft_r2c(
const fftw_plan p,
double *in, fftw_complex *out);
void fftw_execute_dft_c2r(
const fftw_plan p,
fftw_complex *in, double *out);
check whether FFTW_UNALIGNED is necessary as a planner flag
or use fftw_malloc and fftw_free in this module for the arrays betweeen which the fft takes place.
*/
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* EXTERNAL INCLUDES */
/* ------------------------------------------------------------ */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <fftw3.h>
#ifndef OPENMPTIR
#undef OPENMPFFT
#endif
#ifdef OPENMPFFT
#undef OPENMPTIR
#endif
#ifdef OPENMPTIR
#define OPENMPFFT
#include <omp.h>
#endif
/* #include <gsl/gsl_randist.h> */
/* #include <gsl/gsl_cdf.h> */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* INTERNAL INCLUDES */
/* ------------------------------------------------------------ */
#include <engalmod.h>
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@def _MEMORY_HERE_ON
@brief Controls the use of the memory_here module
If you don't want to use the memory_here facility comment this
define, otherways it will be included.
*/
/* ------------------------------------------------------------ */
/* #define _MEMORY_HERE_ON */
/* #include <memory_here.h> */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* (PRIVATE) SYMBOLIC CONSTANTS */
/* ------------------------------------------------------------ */
#define PI_HERE 3.141592653589793115997963468544185161590576171875
#define SQRTPI 1.772453850905516
#define SQRTOF2 0.70710678118655
/* This is -1024 */
#define HOT_VALUE -1024
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* STRUCTS */
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* (PRIVATE) GLOBAL VARIABLES */
/* ------------------------------------------------------------ */
static float *expofacsfft_;
static float *expofacsfft_noise_;
static float *veloarray_;
static float *veloarray_noise_;
static float sigma_maj_;
static float sigma_min_;
static float sigma_maj_noise_;
static float sigma_min_noise_;
static Cube original_;
static Cube model_;
static Cube noise_;
static Cube expcube_model_;
static Cube expcube_noise_;
static double *chisquare_;
static fftwf_complex *transformed_cube_model_;
static fftwf_complex *transformed_cube_noise_;
static fftwf_plan plan_noise_, plin_noise_;
static fftwf_plan plan_model_, plin_model_;
static int cubesizexhalf_;
static int cubesizeyhalf_;
static int newsize_;
static int dummy_;
static Cube *(*conmodel_)(void);
static Cube *(*connoise_)(void);
static double (*fetchchisquare_)(void);
static float noiseconstant_1_;
static float noiseconstant_2_;
static float modelconstant_1_;
static int realorigsizex_;
static int realorigsizey_;
static int realmodelsizex_;
static int realmodelsizey_;
static float oldsigma_;
#ifdef OPENMPTIR
#include <omp.h>
#endif
static int threads_;
static double *vector_;
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* PRIVATE FUNCTION DECLARATIONS */
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static double fetchchisquare_unflagged(void)
@brief Get the chisquare without taking care of flags
Returns the chisquare without taking care of flags. This function
will be assigned to the pointer of fetchchisquare if no blanked
pixels are found in the cube.
@return double fetchchisquare_unflagged the chisquared
*/
/* ------------------------------------------------------------ */
static double fetchchisquare_unflagged(void);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static double fetchchisquare_flagged(void)
@brief Get the chisquare taking care of flags
Returns the chisquare taking care of flags. This function
will be assigned to the pointer of fetchchisquare if any blanked
pixel is found in the cube.
@return double fetchchisquare_unflagged the chisquared
*/
/* ------------------------------------------------------------ */
static double fetchchisquare_flagged(void);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static float fftgaussian (int nx, int ny, int nv, float *expofacs)
@brief Calculate a gaussian
Returns the value of a gaussian in dependence of expofacs.
gaussian = exp((expofacs[0]*nx*nx+expofacs[1]*nx*ny+expofacs[2]*
ny*ny+expofacs[3]*nv*nv+expofacs[4])).
To be used by convolgaussfft. No modulation with respect of the
signum of the coordinates will be done. For a number in the exponent
lesser than MINEXPONENT the return value is 0.
@todo Implement the last thing in the description
@param nx (int) Relative pixelposition in x
@param ny (int) Relative pixelposition in x
@param nv (int) Relative pixelposition in x
@param expofacs (float *) Factors in the gaussian, calculated by
expofacsfft and normalised with respect to the sizes of the array
@return float fftgaussian The gaussian at the desired position
*/
/* ------------------------------------------------------------ */
static float fftgaussian (int nx, int ny, int nv, float *expofacs, float *veloarray);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static float fftgaussian (int nx, int ny, int nv, float *expofacs, float *veloarray)
@brief Calculate a gaussian
Returns the value of a gaussian in dependence of expofacs.
gaussian = exp((expofacs[0]*nx*nx+expofacs[1]*nx*ny+expofacs[2]*
ny*ny+expofacs[3]*nv*nv+expofacs[4])).
To be used by convolgaussfft. No modulation with respect of the
signum of the coordinates will be done. For a number in the exponent
lesser than MINEXPONENT the return value is 0.
@todo Implement the last thing in the description
@param nx (int) Relative pixelposition in x
@param ny (int) Relative pixelposition in x
@param nv (int) Relative pixelposition in x
@param expofacs (float *) Factors in the gaussian, calculated by
expofacsfft and normalised with respect to the sizes of the array
@return float fftgaussian The gaussian at the desired position
*/
/* ------------------------------------------------------------ */
static float fftgaussian (int nx, int ny, int nv, float *expofacs, float *veloarray);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static float fftgaussian (int nx, int ny, int nv, float *expofacs)
@brief Calculate a gaussian
Returns the value of a gaussian in dependence of expofacs.
gaussian = exp((expofacs[0]*nx*nx+expofacs[1]*nx*ny+expofacs[2]*
ny*ny+expofacs[3]*nv*nv+expofacs[4])).
To be used by convolgaussfft. No modulation with respect of the
signum of the coordinates will be done. For a number in the exponent
lesser than MINEXPONENT the return value is 0.
@todo Implement the last thing in the description
@param nx (int) Relative pixelposition in x
@param ny (int) Relative pixelposition in x
@param nv (int) Relative pixelposition in x
@param expofacs (float *) Factors in the gaussian, calculated by
expofacsfft and normalised with respect to the sizes of the array
@return float fftgaussian The gaussian at the desired position
*/
/* ------------------------------------------------------------ */
static float fftgaussian_array (int nx, int ny, int nv, float *expofacs, float *array, float *veloarray);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static float fftgaussian2d (int nx, int ny, float *expofacs)
@brief Calculate a gaussian
Returns the value of a gaussian in dependence of expofacs with
nu_v = 0. gaussian = exp((expofacs[0]*nx*nx+expofacs[1]*nx*ny+
expofacs[2]*ny*ny+expofacs[4])).
To be used by convolgaussfft. No modulation with respect of the
signum of the coordinates will be done. For a number in the exponent
lesser than MINEXPONENT the return value is 0 (see fftgaussian).
@todo The last item in the description to be implemented
@param nx (int) Relative pixelposition in x
@param ny (int) Relative pixelposition in y
@param expofacs (float *) Factors in the gaussian, calculated by
expofacsfft and normalised with respect to the sizes of the array
@return float fftgaussian2d: The gaussian at the desired position,
no error handling.
*/
/* ------------------------------------------------------------ */
static float fftgaussian2d(int nx, int ny, float *expofacs);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static float fftgaussian2d (int nx, int ny, float *expofacs)
@brief Calculate a gaussian
Returns the value of a gaussian in dependence of expofacs with
nu_v = 0. gaussian = exp((expofacs[0]*nx*nx+expofacs[1]*nx*ny+
expofacs[2]*ny*ny+expofacs[4])).
To be used by convolgaussfft. No modulation with respect of the
signum of the coordinates will be done. For a number in the exponent
lesser than MINEXPONENT the return value is 0 (see fftgaussian).
@todo The last item in the description to be implemented
@param nx (int) Relative pixelposition in x
@param ny (int) Relative pixelposition in y
@param expofacs (float *) Factors in the gaussian, calculated by
expofacsfft and normalised with respect to the sizes of the array
@return float fftgaussian2d: The gaussian at the desired position,
no error handling.
*/
/* ------------------------------------------------------------ */
static float fftgaussian2d_array(int nx, int ny, float *expofacs, float *array);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static Cube *convolgaussfft_here(void)
@brief Convolve a cube with a gaussian via fft
In-place convolution of a cube Cube with a gaussian via fft. The
convolution is not normalised in the xy-plane but in v. No
convolution takes place in v-direction in case of only one
plane. See function expofacsfft_here for definition of expofacsfft_
array.
@param cube (Cube *) The cube
@return (success) Cube *convolgaussfft_here: The convolved cube\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static Cube *convolgaussfft_here(void);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static Cube *convolgaussfft_here_single(void)
@brief Convolve a cube with a gaussian via fft
In-place convolution of a cube Cube with a gaussian via fft. The
convolution is not normalised in the xy-plane but in v. No
convolution takes place in v-direction in case of only one
plane. See function expofacsfft_here for definition of expofacsfft_
array.
@param cube (Cube *) The cube
@return (success) Cube *convolgaussfft_here: The convolved cube\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static Cube *convolgaussfft_here_single(void);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static Cube *convolgaussfft_noise(Cube *cube)
@brief Calculation of a weights map from the cube
cube is convolved with a beam of sqrt(1/2) times the sigma of the
convolving beam and normalized with a factor
2*sqrt(pi)*sigma_v*fluxpoint, where fluxpoint is the flux of one
pointsource in galmod. This is not an in-place convolution, but it
is safed to noise_.points. Then the noise of the original cube
squared is added to noise_.points (more accurately this is done in
Fourier-space before backtransformation.) The resulting map is used
as a weights map for calculation of the chisquare. See function
expofacsfft_noise for definition of expofacsfft_noise_ array.
@param cube (Cube *) The (pointsource) cube
@return (success) Cube *convolgaussfft_here: The convolved cube\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static Cube *convolgaussfft_noise(void);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static Cube *convolgaussfft_noise_single(Cube *cube)
@brief Calculation of a weights map from the cube
cube is convolved with a beam of sqrt(1/2) times the sigma of the
convolving beam and normalized with a factor
2*sqrt(pi)*sigma_v*fluxpoint, where fluxpoint is the flux of one
pointsource in galmod. This is not an in-place convolution, but it
is safed to noise_.points. Then the noise of the original cube
squared is added to noise_.points (more accurately this is done in
Fourier-space before backtransformation.) The resulting map is used
as a weights map for calculation of the chisquare. See function
expofacsfft_noise for definition of expofacsfft_noise_ array.
@param cube (Cube *) The (pointsource) cube
@return (success) Cube *convolgaussfft_here: The convolved cube\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static Cube *convolgaussfft_noise_single(void);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static void makemodelarray(float *array)
@brief Fill the allocated array *array with precalculated summands for exp evaluation of the model_ cube
@param cube (Cube *) The (pointsource) cube
@return (success) Cube *convolgaussfft_here: The convolved cube\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static void makemodelarray(float *array);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static void makenoisearray(float *array)
@brief Fill the allocated array *array with precalculated summands for exp evaluation of the model_ cube
@param cube (Cube *) The (pointsource) cube
@return (success) Cube *convolgaussfft_here: The convolved cube\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static void makenoisearray(float *array);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static float findpixelrealrel(Cube cube, int x, int y, int v)
@brief Find relative pixel values in a padded Cube
The zero coordinate is array[0]. This function is not safe at all!
@param array (float *) The input cube
@param x (int) relative x coordinate
@param y (int) relative y coordinate
@param v (int) relative v coordinate
@return (success) float findpixelrel: Pixel value
*/
/* ------------------------------------------------------------ */
static float findpixelrealrel(Cube cube, int x, int y, int v);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static float findpixelrealrel(Cube cube, int x, int y, int v)
@brief Find relative pixel values in a padded Cube
The zero coordinate is array[0]. This function is not safe at all!
@param array (float *) The input cube
@param x (int) relative x coordinate
@param y (int) relative y coordinate
@param v (int) relative v coordinate
@return (success) float findpixelrel: Pixel value
*/
/* ------------------------------------------------------------ */
static float findpixelrealrelmod(Cube cube, int x, int y, int v);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static float *expofacsfft_here(float sigma_maj, float sigma_min, float *sincosofangle)
@brief Calculate static factors needed by convolgaussfft
Returns an allocated array containing factors needed by
convolgaussfft to convolve an array with a gaussian with sigma at
the major axis sigma_major, minor axis sigma_minor. These factors
are calculated from the measures of the convolution kernel and won't
change during the whole program. There are, however members of the
array that will change and will be added by calling the
changeexpofacsfft and changeexpofacsfft_noise routines.
@param sigma_maj (float) The sigma in direction of the major axis
@param sigma_min (float) The sigma in direction of the minor axis
@param sincosofangle (float *) An array containing the sin and the cos
of the position angle
@return (success) float *expofacsfft: The factors wanted\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static float *expofacsfft_here(float sigma_maj, float sigma_min, float *sincosofangle);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static Cube *convolgaussfft_here_array(void)
@brief Convolve a cube with a gaussian via fft using a predefined array
In-place convolution of a cube Cube with a gaussian via fft. The
convolution is not normalised in the xy-plane but in v. No
convolution takes place in v-direction in case of only one
plane. See function expofacsfft_here for definition of expofacsfft_
array.
@param cube (Cube *) The cube
@return (success) Cube *convolgaussfft_here: The convolved cube\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static Cube *convolgaussfft_here_array(void);
/* static void convolgaussfft_here_array_help1(void); */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static Cube *convolgaussfft_here_single_array(void)
@brief Convolve a cube with a gaussian via fft using a predefined array
In-place convolution of a cube Cube with a gaussian via fft. The
convolution is not normalised in the xy-plane but in v. No
convolution takes place in v-direction in case of only one
plane. See function expofacsfft_here for definition of expofacsfft_
array.
@param cube (Cube *) The cube
@return (success) Cube *convolgaussfft_here: The convolved cube\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static Cube *convolgaussfft_here_single_array(void);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static Cube *convolgaussfft_noise_array(Cube *cube)
@brief Calculation of a weights map from the cube using a predefined array
cube is convolved with a beam of sqrt(1/2) times the sigma of the
convolving beam and normalized with a factor
2*sqrt(pi)*sigma_v*fluxpoint, where fluxpoint is the flux of one
pointsource in galmod. This is not an in-place convolution, but it
is safed to noise_.points. Then the noise of the original cube
squared is added to noise_.points (more accurately this is done in
Fourier-space before backtransformation.) The resulting map is used
as a weights map for calculation of the chisquare. See function
expofacsfft_noise for definition of expofacsfft_noise_ array.
@param cube (Cube *) The (pointsource) cube
@return (success) Cube *convolgaussfft_here: The convolved cube\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static Cube *convolgaussfft_noise_array(void);
/* static void convolgaussfft_noise_array_help1(void); */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static Cube *convolgaussfft_noise_single_array(Cube *cube)
@brief Calculation of a weights map from the cube using a predefined array
cube is convolved with a beam of sqrt(1/2) times the sigma of the
convolving beam and normalized with a factor
2*sqrt(pi)*sigma_v*fluxpoint, where fluxpoint is the flux of one
pointsource in galmod. This is not an in-place convolution, but it
is safed to noise_.points. Then the noise of the original cube
squared is added to noise_.points (more accurately this is done in
Fourier-space before backtransformation.) The resulting map is used
as a weights map for calculation of the chisquare. See function
expofacsfft_noise for definition of expofacsfft_noise_ array.
@param cube (Cube *) The (pointsource) cube
@return (success) Cube *convolgaussfft_here: The convolved cube\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static Cube *convolgaussfft_noise_single_array(void);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static void changeexpofacsfft(float sigma_v)
@brief Calculate factors needed by convolgaussfft
Changes the expofacsfft_ array containing factors needed by
convolgaussfft to convolve an array with a gaussian with sigma at
the major axis sigma_major, minor axis sigma_minor, and v-axis
sigma_v.
@param sigma_v (float) The (original) sigma in v-direction
@return (success) void
*/
/* ------------------------------------------------------------ */
static void changeexpofacsfft(float sigma_v);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static void changeexpofacsfft_noise(float sigma_v)
@brief Calculate factors needed by convolgaussfft_noise
Changes the expofacsfft_noise_ array containing factors needed by
convolgaussfft to convolve an array with a gaussian with sigma at
the major axis sigma_major, minor axis sigma_minor, and v-axis
sigma_v/sqrt(2). Also, a normalisation is applied, such that the
output is scaled by scale*2*sqrt(pi)*sigma_v*fluxpoint.
@param sigma_maj (float) The sigma in direction of the major axis
@param sigma_min (float) The sigma in direction of the minor axis
@param sigma_v (float) The sigma in v-direction
@param sincosofangle (float *) An array containing the sin and the cos
of the position angle
@return (success) float *expofacsfft: The factors wanted\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static void changeexpofacsfft_noise(float sigma_v);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static float *sincosofangle(float angle)
@brief Returns the sin and the cosine of an angle in an
allocated array
Returns an allocated array containing the cos and the sin of an
angle in degrees. The array has to be freed.
@param angle (float) Angle in degrees
@return (success) float *sincosofangle: An array with the sin and
the cos of the angle\n
(error) NULL
*/
/* ------------------------------------------------------------ */
static float *sincosofangle(float angle);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static float degreetoradian(float degrees)
@brief Converts from degrees to radians
Changes a number from deg to rad
@param degrees (float) An angle in degrees
@return float degreetoradian: Input angle in radians
*/
/* ------------------------------------------------------------ */
static float degreetoradian(float degrees);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/**
@fn static int initchisquare((float *arrayorig, float *arraymodel, int
*x, int *y, int *v, float *hpbwmaj, float *hpbwmin, float *pa, float
*scale, float *flux, float *sigma, int *mode, int *arrayvsize,
double *chisquare, float *noiseweight, int *inimode, int *threads)
@brief Initializes the chisquare derivation control, internal function
This function will be called internally by initchisquare_, while as
many parameters as possible are made local copies to this module.
Initialising routine. Reads in the arrays corresponding to the
original cube and the model and links them into global tirific Cubes
that are private to the module. The user gives the logical size of
the cubes *x, *y, *v, where any pixel is adressed by
pixelvalue(x,y,v) = x+(physical_size_in_x)(y+physical_size_in_y*v),
where x,y,v is the integer pixelvalues starting with 0. The physical
size of the array in x has to be 2*(logical_size_x/2+1), where the
division is an integer division (rounds down to the next
integer). The physical size of the array in y has to be the same as
the logical, while there is no restriction of the physical size in v
else than it has to be larger than the logical size in v. Hence the
physical size is passed in arrayvsize.
hpbwmaj, hpbwmin are the major axis and minor axis HPBW of the
convolving gaussian beam, pa its position angle. These quantities
don't change during several calculations of the chisquare. Scale is
the scale the passed model has to be multiplied by to match the
units of the original (If the model is in W.U., the original in Jy,
this is 0.005). Flux is the flux of one pointsource in galmod. Sigma
is the rms noise in the original.
At initialisation the mode of the chisquare calculation can be
set. Bitwise the following adjustments are made, while the value to be passed with mode is bit0+2*bit1:
Bit 0: A uniform error that equals the noise in the original is used
to weight the chisquare if unset (bit0 = 0), if set (bit0 = 1), a
weightmap is calculated from the model, that is then used to weight
the chisquare.
Bit 1: If set (bit1 = 1), memory will be allocated for one or two
arrays within which precalculated factors are stored that otherways
at time of chisquare calculation will be repeatedly
recalculated. Saves some time. If unset (bit1 = 0) this optimisation
will not take place, saving memory.
Bit 2: If set (bit2 = 0), memory will be allocated for out-of-place
ffts instead of the in-place ffts used for the convolution. fftw can
calculate more efficient ways to perform the fft. This costs a lot
of memory (see list below), such that for computers with small
memory and big cubes, this option should be unset.
initchisquare will check upon the feasibility of the operation and
return 0 if the memory allocations cannot be made. The additionally
required memory (not counting the passed arrays) is a bit more than:
mode = 0: sizeof(float)* [ 0 ]
mode = 1: sizeof(float)* [ (*x/2)*2+2)**y**v ]
mode = 2: sizeof(float)* [ (*x/2+1)**y ]
mode = 3: sizeof(float)* [ (*x/2)*2+2)**y**v + 2 * (*x/2+1)**y ]
mode = 4: sizeof(float)* [ (*x/2)*2+2)**y**v ]
mode = 5: sizeof(float)* [ 3 * (*x/2)*2+2)**y**v ]
mode = 6: sizeof(float)* [ (*x/2)*2+2)**y**v + (*x/2+1)**y ]
mode = 7: sizeof(float)* [ 3 * (*x/2)*2+2)**y**v + 2 * (*x/2+1)**y ]
The chisquare evaluation goes as follows (logically, internal
calculation goes a slightly different path):
1) The pointsource model is convolved with a gaussian beam of HPBW
size hpbwmaj, hpbwmin, hpbwv (given when calling the getchisquare
function), The beam is rotated from N to E with the position angle
pa, but not in the third dimension. All values are pixel values,
except for the pa, which is in deg. The result of the convolution is
m.
2) The pointsource model is convolved with a gaussian beam of
1/sqrt(2) times the size of the original convolving beam and the
same position angle and then multiplied with the flux of one
pointsource pointflux resulting in the map r
3) An inverse weightmap w is computed with w(x,y,z) =
((sigma*noiseweight)^2+r(x,y,z))*noiseweight^(-2)
The noiseweight parameter serves two functions. It determines how
much weight is laid on the quantisation noise imposed by the
pointsource quantisation in comparison to the natural noise sigma of
the original datacube. It hence also serves as a downweighting
function of regions with high surface density. Bit 0 of model set to 0 is
equivalent with noiseweight = infity, while noiseweight towards 0 (0
is actually an error) will impose an additional weight on regions of
low surface density.
4) The chisquare is calculated from original o, convolved model m,
and inverse noisemap n by chisquare = sum_x_y_v
(o(x,y,v)-m(x,y,v))^2/n(x,y,v)
A value of mode of 2 or 3 means that the module needs a bit more
memory, but will be slightly faster, as some factors for the
evaluation of the chisquare are precalculated and stored in the
memory.
The chisquare parameter passes the pointer to the variable that
contains the chisquare.
As this module is meant for the use in interfacing fortran and c,
all parameters are pointers. It is recommended to use them only for
passing them into the initialisation routine, as some of them are
used internally. Mark that when calling the initchisquare_ routine,
the input arrays will be overwritten. They should be initialised
after calling initchisquare_.
The parameter inimode ranges from 0 to 3 and determines the time
that is given to the fft initialising routines to calculate the
shortest way to perform an fft. 0 means that almost no time is spent
on optimising the routines, which then will take longer, 3 means to
spend a long time optimising (once, for the whole process) to really
get the shortest fft, which maybe pays if a long time is spend
calculating again and again the chisquare.
@param arrayorig (*float) Array corresponding to the original cube
@param arraymodel (*float) Array corresponding to the model (pointsource) cube
@param x (int *) Size of logical array in x (that is regarded in calculation)
@param y (int *) Size fo logical array in y
@param v (int *) Size fo logical array in v
@param hpbwmaj (float *) HPBW of the gaussian beam, major axis
@param hpbwmin (float *) HPBW of the gaussian beam, minor axis
@param pa (float *) Position angle of the gaussian beam
@param scale (float *) Scale factor to scale model by to match original
@param flux (float *) The flux of one pointsource in galmod
@param sigma (float *) Sigma rms in the original
@param mode (int *) Calculation of the chisquare depends also on quantisation noise (1 or 3) or on the sigma rms in the original alone (0 or 2) If set to 2 or 3 a bit more memory is needed but the routine runs a bit faster.
@param arrayxsize (int *) Physical size of reserved arrays of model and original in x, a dummy, is always 2*((int) *x/2+1)
@param chisquare (double *) Pointer to the variable containing the chisquare used throughout the code
@param noiseweight (float *) Parameter used for weighting quantisation noise
@param inimode (int *) Mode for the determination of the best fft.
@param threads (int *) Number of threads.
@return (success) int initchisquare_: 1
(error) 0
*/
/* ------------------------------------------------------------ */
static int initchisquare(float *arrayorig, float *arraymodel, int *x, int *y, int *v, float *hpbwmaj, float *hpbwmin, float *pa, float *scale, float *flux, float *sigma, int *mode, int *arrayvsize, double *chisquare, float *noiseweight, int *inimode, int *threads);
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* FUNCTION CODE */
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Initialisation from external, the sense of this function is to make the module robust to changes from external, i.e., the function expects pointers, because that is what you get when you call c from fortran. Inernally these should be protected, i.e. local static variables are created that are pointed to */
int initchisquare_(float *arrayorig, float *arraymodel, int *x, int *y, int *v, float *hpbwmaj, float *hpbwmin, float *pa, float *scale, float *flux, float *sigma, int *mode, int *arrayvsize, double *chisquare, float *noiseweight, int *inimode, int *threads)
{
static int xm, ym, vm;
static float hpbwmajm, hpbwminm, pam, scalem, fluxm, sigmam;
static int modem, arrayvsizem;
static float noiseweightm;
static int inimodem;
static int threadsm;
xm = *x;
ym = *y;
vm = *v;
hpbwmajm = *hpbwmaj;
hpbwminm = *hpbwmin;
pam = *pa;
scalem = *scale;
fluxm = *flux;
sigmam = *sigma;
modem = *mode;
arrayvsizem = *arrayvsize;
noiseweightm = *noiseweight;
inimodem = *inimode;
threadsm = *threads;
return initchisquare(arrayorig, arraymodel, &xm, &ym, &vm, &hpbwmajm, &hpbwminm, &pam, &scalem, &fluxm, &sigmam, &modem, &arrayvsizem, chisquare, &noiseweightm, &inimodem, &threadsm);
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Initialisation from external, the sense of this function is to make the module robust to changes from external, i.e., the function expects pointers, because that is what you get when you call c from fortran. Inernally these should be protected, i.e. local static variables are created that are pointed to */
int initchisquare_c(float *arrayorig, float *arraymodel, int x, int y, int v, float hpbwmaj, float hpbwmin, float pa, float scale, float flux, float sigma, int mode, int arrayvsize, double *chisquare, float noiseweight, int inimode, int threads)
{
static int xm, ym, vm;
static float hpbwmajm, hpbwminm, pam, scalem, fluxm, sigmam;
static int modem, arrayvsizem;
static float noiseweightm;
static int inimodem;
static int threadsm;
xm = x;
ym = y;
vm = v;
hpbwmajm = hpbwmaj;
hpbwminm = hpbwmin;
pam = pa;
scalem = scale;
fluxm = flux;
sigmam = sigma;
modem = mode;
arrayvsizem = arrayvsize;
noiseweightm = noiseweight;
inimodem = inimode;
threadsm = threads;
return initchisquare(arrayorig, arraymodel, &xm, &ym, &vm, &hpbwmajm, &hpbwminm, &pam, &scalem, &fluxm, &sigmam, &modem, &arrayvsizem, chisquare, &noiseweightm, &inimodem, &threadsm);
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Initialisation */
int initchisquare(float *arrayorig, float *arraymodel, int *x, int *y, int *v, float *hpbwmaj, float *hpbwmin, float *pa, float *scale, float *flux, float *sigma, int *mode, int *arrayvsize, double *chisquare, float *noiseweight, int *inimode, int *threads)
{
float *sincosofangle_;
int physical[3];
int physical2[3];
int logical[3];
int physicaln[3];
int inimodel;
static char usedonce = 0;
/* hyper */
#ifdef OPENMPFFT
fftwf_init_threads();
#endif
if ((usedonce)) {
if ((noise_.points))
fftwf_free(noise_.points);
if ((transformed_cube_noise_))
fftwf_free(transformed_cube_noise_);
if ((transformed_cube_model_))
fftwf_free(transformed_cube_model_);
if ((expcube_model_.points))
fftwf_free(expcube_model_.points);
if ((expcube_noise_.points))
fftwf_free(expcube_noise_.points);
if ((expofacsfft_))
free(expofacsfft_);
if ((expofacsfft_noise_))
free(expofacsfft_noise_);
if ((veloarray_))
fftwf_free(veloarray_);
if ((veloarray_noise_))
fftwf_free(veloarray_noise_);
}
noise_.points = NULL;
transformed_cube_noise_ = NULL;
transformed_cube_model_ = NULL;
expcube_model_.points = NULL;
expcube_noise_.points = NULL;
expofacsfft_ = NULL;
expofacsfft_noise_ = NULL;
veloarray_ = NULL;
veloarray_noise_ = NULL;
oldsigma_ = -1;
threads_ = *threads;
vector_ = (double *) malloc(threads_*sizeof(double));
/* set number of threads */
#ifdef OPENMPTIR
omp_set_num_threads(threads_);
#endif
/* put the chisquare in its place */
chisquare_ = chisquare;
/* Get the array of the original */
original_.points = arrayorig;
/* Get the array of the model */
model_.points = arraymodel;
realorigsizex_ = 2*(*x/2+1);
/* 2*(*x/2+1); */
realorigsizey_ = *y;
realmodelsizex_ = 2*(*x/2+1);
realmodelsizey_ = *y;
/* Allocate memory for the noisecube if the noise per pixel is required in future */
if ((*mode & 1)) {
if (!((noise_.points) = (float *) fftwf_malloc(((*x/2)*2+2)**y**v*sizeof(float))))
goto error;
/* There might be a chance that things work faster with an out-of-place trafo on the expense of double the memory usage */
if (*mode & 4) {
if (!(transformed_cube_noise_ = (fftwf_complex *) fftwf_malloc((*x/2+1)**y**v*sizeof(fftwf_complex)))) {
fftwf_free(noise_.points);
goto error;
}
}
}
else
noise_.points = NULL;
/* There might be a chance that things work faster with an out-of-place trafo on the expense of double the memory usage */
if (*mode & 4) {
if (!(transformed_cube_model_ = (fftwf_complex *) fftwf_malloc((*x/2+1)**y**v*sizeof(fftwf_complex)))) {
if (*mode & 1) {
fftwf_free(noise_.points);
fftwf_free(transformed_cube_noise_);
goto error;
}
}
}
/* Allocate memory for the expcubes if they are required in future */
if ((*mode & 2)) {
if (!((expcube_model_.points) = (float *) fftwf_malloc((*x/2+1)**y*sizeof(float)))) {
if ((*mode & 1))
fftwf_free(noise_.points);
if ((*mode & 4)) {
if ((*mode & 1))
fftwf_free(transformed_cube_noise_);
fftwf_free(transformed_cube_model_);
}
goto error;
}
expcube_model_.size_x = *x/2+1;
expcube_model_.size_y = *y;
expcube_model_.size_v = 1;
expcube_model_.padding = 0;
if ((*mode & 1)) {
if (!((expcube_noise_.points) = (float *) fftwf_malloc((*x/2+1)**y*sizeof(float)))) {
if ((*mode & 1))
fftwf_free(noise_.points);
fftwf_free(expcube_model_.points);
if ((*mode & 4)) {
if ((*mode & 1))
fftwf_free(transformed_cube_noise_);
fftwf_free(transformed_cube_model_);
}
goto error;
}
}
else
expcube_noise_.points = NULL;
/* This info is warranted */
expcube_noise_.size_x = *x/2+1;
expcube_noise_.size_y = *y;
expcube_noise_.size_v = 1;
expcube_noise_.padding = 0;
expcube_noise_.refpix_x = expcube_noise_.refpix_y = expcube_noise_.refpix_v = expcube_model_.refpix_x = expcube_model_.refpix_y = expcube_model_.refpix_v = 0;
}
else
expcube_model_.points = expcube_noise_.points = NULL;
/* Now get the sizes right */
original_.size_x = model_.size_x = noise_.size_x = *x;
original_.size_y = model_.size_y = noise_.size_y = *y;
original_.size_v = model_.size_v = noise_.size_v = *v;
original_.refpix_x = model_.refpix_x = noise_.refpix_x = 0;
original_.refpix_y = model_.refpix_y = noise_.refpix_y = 0;
original_.refpix_v = model_.refpix_v = noise_.refpix_v = 0;
/* We don't need the reference pixel, but the padding */
original_.padding = model_.padding = noise_.padding = (*x/2)*2+2-*x;
/* The scale */
original_.scale = *scale;
model_.scale = *flux;
if (!(*mode & 1))
*noiseweight = 1;
noise_.scale = *sigma**sigma**noiseweight**noiseweight;
expcube_model_.scale = *noiseweight**noiseweight;
/* Now initialize the expofacsfft array */
/* We have only the HPBWs, so calculate the gaussian widths */
if (!(sincosofangle_ = sincosofangle(*pa))) {
if ((*mode & 1)) {
fftwf_free(noise_.points);
if ((*mode & 2))
fftwf_free(expcube_noise_.points);
}
if ((*mode & 2))
free(expcube_model_.points);
if ((*mode & 4)) {
if ((*mode & 1))
fftwf_free(transformed_cube_noise_);
fftwf_free(transformed_cube_model_);
}
goto error;
}
if (!(expofacsfft_ = expofacsfft_here(sigma_maj_ = 0.42466090014401**hpbwmaj, sigma_min_ = 0.42466090014401**hpbwmin, sincosofangle_))) {
if ((*mode & 1)) {
fftwf_free(noise_.points);
if ((*mode & 2))
fftwf_free(expcube_noise_.points);
}
if ((*mode & 2))
fftwf_free(expcube_model_.points);
free(sincosofangle_);
if ((*mode & 4)) {
if ((*mode & 1))
fftwf_free(transformed_cube_noise_);
fftwf_free(transformed_cube_model_);
}
goto error;
}
if (!(expofacsfft_noise_ = expofacsfft_here(sigma_maj_noise_ = sigma_maj_*SQRTOF2, sigma_min_noise_ = sigma_min_*SQRTOF2, sincosofangle_))) {
if ((*mode & 1)) {
fftwf_free(noise_.points);
if ((*mode & 2))
fftwf_free(expcube_noise_.points);
}
if ((*mode & 2))
fftwf_free(expcube_model_.points);
free(sincosofangle_);
free(expofacsfft_);
if ((*mode & 4)) {
if ((*mode & 1))
fftwf_free(transformed_cube_noise_);
fftwf_free(transformed_cube_model_);
}
goto error;
}
/* Now the veloarray */
if (!(veloarray_ = (float *) fftwf_malloc((model_.size_v/2+1)*sizeof(float)))) {
if ((*mode & 1)) {
fftwf_free(noise_.points);
noise_.points = NULL;
if ((*mode & 2)) {
fftwf_free(expcube_noise_.points);
expcube_noise_.points = NULL;
}
}
if ((*mode & 2)) {
fftwf_free(expcube_model_.points);
expcube_model_.points = NULL;
}
free(sincosofangle_);
sincosofangle_ = NULL;
free(expofacsfft_);
expofacsfft_ = NULL;
if ((*mode & 4)) {
if ((*mode & 1)) {
fftwf_free(transformed_cube_noise_);
transformed_cube_noise_ = NULL;
}
fftwf_free(transformed_cube_model_);
transformed_cube_model_ = NULL;
}
goto error;
}
/* Now the veloarray */
if (!(veloarray_noise_ = (float *) fftwf_malloc((model_.size_v/2+1)*sizeof(float)))) {
if ((*mode & 1)) {
fftwf_free(noise_.points);
noise_.points = NULL;
if ((*mode & 2)) {
fftwf_free(expcube_noise_.points);
expcube_noise_.points = NULL;
}
}
if ((*mode & 2)) {
fftwf_free(expcube_model_.points);
expcube_model_.points = NULL;
}
free(sincosofangle_);
sincosofangle_ = NULL;
free(expofacsfft_);
expofacsfft_ = NULL;
if ((*mode & 4)) {
if ((*mode & 1)) {
fftwf_free(transformed_cube_noise_);
transformed_cube_noise_ = NULL;
}
fftwf_free(transformed_cube_model_);
transformed_cube_model_ = NULL;
}
fftwf_free(veloarray_);
goto error;
}
/* Fill the arrays that describe the transformation */
if (model_.size_v != 1) {
logical[0] = model_.size_v;
logical[1] = model_.size_y;
logical[2] = model_.size_x;
physical[0] = *arrayvsize;
physical[1] = model_.size_y;
physical[2] = 2*(model_.size_x/2)+2;
physicaln[0] = model_.size_v;
physicaln[1] = model_.size_y;
physicaln[2] = 2*(model_.size_x/2)+2;
physical2[0] = model_.size_v;
physical2[1] = model_.size_y;
physical2[2] = (model_.size_x/2)+1;
if (*mode & 2) {
connoise_ = convolgaussfft_noise_array;
conmodel_ = convolgaussfft_here_array;
}
else {
connoise_ = convolgaussfft_noise;
conmodel_ = convolgaussfft_here;
}
}
else {
logical[0] = model_.size_y;
logical[1] = model_.size_x;
physical[0] = model_.size_y;
physical[1] = 2*(model_.size_x/2)+2;
physicaln[0] = model_.size_y;
physicaln[1] = 2*(model_.size_x/2)+2;
physical2[0] = model_.size_y;
physical2[1] = (model_.size_x/2)+1;
if (*mode & 2) {
connoise_ = convolgaussfft_noise_single;
conmodel_ = convolgaussfft_here_single;
}
else {
connoise_ = convolgaussfft_noise_single_array;
conmodel_ = convolgaussfft_here_single_array;
}
}
/* Take the input from inimodel to decide upon the way to
initialize fftw */
switch (*inimode) {
case 1:
inimodel = FFTW_MEASURE;
break;
case 2:
inimodel = FFTW_PATIENT;
break;
case 3:
inimodel = FFTW_EXHAUSTIVE;
break;
default:
inimodel = FFTW_ESTIMATE;
break;
}
/* Now make the plans for the fftw */
#ifdef OPENMPFFT
fftwf_plan_with_nthreads(threads_);
#endif
if (*mode & 1) {
/* point the trasnsformed cube to the cube itself for an in-place
transformation */
if (*mode & 4)
;
else
transformed_cube_noise_ = (fftwf_complex *) noise_.points;
/* fill plan_noise_ and plin_noise_ with the necessary information. Take care with the order of the axes, reversed for fftw */
if (model_.size_v != 1) {
plan_noise_ = fftwf_plan_many_dft_r2c(3, logical, 1, model_.points, physical, 1, 0, transformed_cube_noise_, physical2, 1, 0, inimodel | FFTW_PRESERVE_INPUT);
plin_noise_ = fftwf_plan_many_dft_c2r(3, logical, 1, transformed_cube_noise_, physical2, 1, 0, noise_.points, physicaln, 1, 0, inimodel);
/* (*x/2)*2+2)**y**v */
/* fftwf_plan_dft_c2r_3d(model_.size_v, model_.size_y, model_.size_x, transformed_cube_noise_, noise_.points, inimodel); */
}
else {
plan_noise_ = fftwf_plan_many_dft_r2c(2,logical , 1, model_.points, physical, 1, 0, transformed_cube_noise_, physical2, 1, 0, inimodel | FFTW_PRESERVE_INPUT);
plin_noise_ = fftwf_plan_dft_c2r_2d(model_.size_y, model_.size_x, transformed_cube_noise_, noise_.points, inimodel);
}
}
/* Fill the global variables that affect the noise estimation and
the convolution */
cubesizexhalf_ = model_.size_x/2;
cubesizeyhalf_ = model_.size_y/2;
newsize_ = cubesizexhalf_+1; /* The physical size of the cube in x */
dummy_ = model_.size_v/2;
/* point the trasnsformed cube to the cube itself for an in-place transformation */
if (*mode & 4)
;
else
transformed_cube_model_ = (fftwf_complex *) (model_).points;
if (model_.size_v != 1) {
logical[0] = model_.size_v;
logical[1] = model_.size_y;
logical[2] = model_.size_x;
physical[0] = *arrayvsize;
/* formerly: model_.size_v */
physical[1] = model_.size_y;
physical[2] = 2*(model_.size_x/2)+2;
physical2[0] = *arrayvsize;
/* formerly: model_.size_v */
physical2[1] = model_.size_y;
physical2[2] = (model_.size_x/2)+1;
}
else {
logical[0] = model_.size_y;
logical[1] = model_.size_x;
physical[0] = model_.size_y;
physical[1] = 2*(model_.size_x/2)+2;
physical2[0] = model_.size_y;
physical2[1] = (model_.size_x/2)+1;
}
/* fill plan and plin with the necessary information. Take care with the order of the axes, reversed for fftw */
if (model_.size_v != 1) {
plan_model_ = fftwf_plan_many_dft_r2c(3, logical, 1, model_.points, physical, 1, 0, transformed_cube_model_, physical2, 1, 0, inimodel);
plin_model_ = fftwf_plan_many_dft_c2r(3, logical, 1, transformed_cube_model_, physical2, 1, 0, model_.points, physical, 1, 0, inimodel);
}
else {
plan_model_ = fftwf_plan_dft_r2c_2d((model_).size_y, (model_).size_x, (model_).points, transformed_cube_model_, inimodel);
plin_model_ = fftwf_plan_dft_c2r_2d((model_).size_y, (model_).size_x, transformed_cube_model_, (model_).points, inimodel);
}
/* Now do some silly hacking */
if (*mode & 1) {
noiseconstant_1_ = (-2*SQRTOF2*PI_HERE*SQRTOF2*PI_HERE)/(original_.size_v*original_.size_v);
noiseconstant_2_ = original_.scale*model_.scale*2*PI_HERE*sigma_min_noise_*sigma_maj_noise_/(original_.size_v*original_.size_y*original_.size_x*2*SQRTPI);
}
modelconstant_1_ = -2*(PI_HERE*PI_HERE)/(original_.size_v*original_.size_v);
/* Fill the arrays for the exponential acceleration if required */
if (*mode & 2) {
/* In any case that is for the model */
makemodelarray(expcube_model_.points);
/* Could be that it is also the noisemap */
if (*mode & 1) {
makenoisearray(expcube_noise_.points);
}
}
/* Now check for the function that is needed to calculate the chisquare */
engalmod_chflgs();
/* nearly finished */
free(sincosofangle_);
return 1;
error:
noise_.points = NULL;
transformed_cube_noise_ = NULL;
transformed_cube_model_ = NULL;
expcube_model_.points = NULL;
expcube_noise_.points = NULL;
expofacsfft_ = NULL;
expofacsfft_noise_ = NULL;
veloarray_ = NULL;
veloarray_noise_ = NULL;
return 0;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* (Re-)Initialisation of the chisquare finding routine */
void engalmod_chflgs(void)
{
int i,j,k;
fetchchisquare_ = &fetchchisquare_unflagged;
for(k = 0; k < original_.size_v; ++k){
for(j = 0; j < original_.size_y; ++j) {
for(i = 0; i < original_.size_x; ++i) {
/* A nan compared with itself is false */
if (findpixelrealrel(original_, i, j, k) != findpixelrealrel(original_, i, j, k)) {
/* if ((double) ((findpixelrealrel(original_, i, j, k))) < HOT_VALUE) { */
fetchchisquare_ = &fetchchisquare_flagged;
break;
}
}
}
}
return;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
static float findpixelrealrel(Cube cube, int x, int y, int v)
{
return (cube.points)[x+realorigsizex_*(y+realorigsizey_*v)];
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
static double fetchchisquare_flagged(void)
{
int i,j,k;
double chisquare = 0;
int nthreadz = 0;
for (i =0 ; i < threads_; ++i)
vector_[i] = 0.0;
/* Now calculate the chisquare */
if ((noise_.points)) {
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for(k = 0; k < original_.size_v; ++k){
#ifdef OPENMPTIR
if (nthreadz == 0) {
nthreadz = omp_get_num_threads();
}
#else
nthreadz = 1;
#endif
for(j = 0; j < original_.size_y; ++j) {
for(i = 0; i < original_.size_x; ++i) {
#ifdef OPENMPTIR
/* A nan compared with itself is false */
if (findpixelrealrel(original_, i, j, k) == findpixelrealrel(original_, i, j, k)) {
/* if (findpixelrealrel(original_, i, j, k) > HOT_VALUE) { */
vector_[omp_get_thread_num()] += (double) ((findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))*(findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))/findpixelrealrelmod(noise_, i, j, k));
}
#else
/* A nan compared with itself is false */
if (findpixelrealrel(original_, i, j, k) == findpixelrealrel(original_, i, j, k)) {
/* if (findpixelrealrel(original_, i, j, k) > HOT_VALUE) { */
vector_[0] += (double) ((findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))*(findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))/findpixelrealrelmod(noise_, i, j, k));
}
#endif
}
}
}
for (i = 0; i < nthreadz; ++i)
chisquare += vector_[i]*(double) expcube_model_.scale;
}
else {
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for(k = 0; k < original_.size_v; ++k){
#ifdef OPENMPTIR
if (nthreadz == 0) {
nthreadz = omp_get_num_threads();
}
#else
nthreadz = 1;
#endif
for(j = 0; j < original_.size_y; ++j) {
for(i = 0; i < original_.size_x; ++i) {
#ifdef OPENMPTIR
/* A nan compared with itself is false */
if (findpixelrealrel(original_, i, j, k) == findpixelrealrel(original_, i, j, k)) {
/* if (findpixelrealrel(original_, i, j, k) > HOT_VALUE) { */
vector_[omp_get_thread_num()] += (double) ((findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))*(findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k)));
}
#else
/* A nan compared with itself is false */
if (findpixelrealrel(original_, i, j, k) == findpixelrealrel(original_, i, j, k)) {
/* if (findpixelrealrel(original_, i, j, k) > HOT_VALUE) { */
vector_[0] += (double) ((findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))*(findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k)));
}
#endif
}
}
}
for (i = 0; i < nthreadz; ++i)
chisquare += vector_[i]/noise_.scale;
}
return chisquare;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
static double fetchchisquare_unflagged(void)
{
int i,j,k;
double chisquare = 0;
int nthreadz = 0;
for (i =0 ; i < threads_; ++i)
vector_[i] = 0;
/* Now calculate the chisquare */
if ((noise_.points)) {
#ifdef OPENMPTIR
# pragma omp parallel for
#endif
for(k = 0; k < original_.size_v; ++k){
#ifdef OPENMPTIR
if (nthreadz == 0) {
nthreadz = omp_get_num_threads();
}
#else
nthreadz = 1;
#endif
for(j = 0; j < original_.size_y; ++j) {
for(i = 0; i < original_.size_x; ++i) {
#ifdef OPENMPTIR
vector_[omp_get_thread_num()] += (double) ((findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))*(findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))/findpixelrealrelmod(noise_, i, j, k));
#else
vector_[0] += (double) ((findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))*(findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))/findpixelrealrelmod(noise_, i, j, k));
#endif
}
}
}
for (i = 0; i < nthreadz; ++i)
chisquare += vector_[i]*(double) expcube_model_.scale;
}
else {
#ifdef OPENMPTIR
# pragma omp parallel for
#endif
for(k = 0; k < original_.size_v; ++k){
#ifdef OPENMPTIR
if (nthreadz == 0)
nthreadz = omp_get_num_threads();
#else
nthreadz = 1;
#endif
for(j = 0; j < original_.size_y; ++j) {
for(i = 0; i < original_.size_x; ++i) {
#ifdef OPENMPTIR
vector_[omp_get_thread_num()] += (double) ((findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))*(findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k)));
#else
vector_[0] += (double) ((findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k))*(findpixelrealrel(original_, i, j, k)-findpixelrealrelmod(model_, i, j, k)));
#endif
}
}
}
for (i = 0; i < nthreadz; ++i)
chisquare += vector_[i]/noise_.scale;
}
return chisquare;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
static float findpixelrealrelmod(Cube cube, int x, int y, int v)
{
return (cube.points)[x+realmodelsizex_*(y+realmodelsizey_*v)];
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
double getchisquare_(float *sigma_v)
{
/* formerly times 0.42466090014401, but now it's the real sigma */
return getchisquare_c(*sigma_v);
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
double getchisquare_c (float sigma_v)
/* If ever the flux of one pointsource changes during one run, activate this */
/* static double getchisquare (float *array, float HPBW_v, float pointflux) */
{
/* Set the chisquare to 0 */
double chisquare = 0;
/* If a weight map should be calculated */
if ((noise_.points)) {
if (sigma_v != oldsigma_) {
changeexpofacsfft_noise(sigma_v);
}
/* If ever the flux of one pointsource changes during one run, activate this */
/* model_.scale = pointflux; */
(*connoise_)();
}
/* In any case we need the convolved cube */
if (sigma_v != oldsigma_) {
changeexpofacsfft(sigma_v);
}
(*conmodel_)();
oldsigma_ = sigma_v;
/* Now calculate the chisquare */
chisquare = (*fetchchisquare_)();
*chisquare_ = chisquare;
return chisquare;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Convolve a cube with a gaussian via fft */
static Cube *convolgaussfft_here(void)
{
int i, j, k;
float expresult; /* A dummy */
/* Convolution in all dimensions or in xy only */
/* Now do the transform */
fftwf_execute(plan_model_);
/* multiply with the gaussian, first for nu_v = 0 */
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < (model_).size_y; ++j) {
/* The exponential will be evaluated from 0, ... , N/2 and -1, ..., -N/2 or -N/2 - 1 */
expresult = fftgaussian2d((i <= cubesizexhalf_) ? i : (i-(model_).size_x), (j <= cubesizeyhalf_) ? j : (j-(model_).size_y), expofacsfft_);
transformed_cube_model_[i+newsize_*j][0] = expresult*transformed_cube_model_[i+newsize_*j][0];
transformed_cube_model_[i+newsize_*j][1] = expresult*transformed_cube_model_[i+newsize_*j][1];
}
}
/* Check for an extra-axis in v, i.e. if the dimension in v is even, we have to calculate one v-plane separately */
if (!((model_).size_v % 2)) {
/* multiply with the gaussian, first for nu_v = N_v/2 */
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < (model_).size_y; ++j)
{
/* The exponential will be evaluated from 0, ... , N/2 and -1, ..., -N/2 or -N/2 - 1 */
expresult = fftgaussian((i <= cubesizexhalf_) ? i : (i-(model_).size_x), (j <= cubesizeyhalf_) ? j : (j-(model_).size_y), dummy_, expofacsfft_, veloarray_);
transformed_cube_model_[i+newsize_*(j+(model_).size_y*dummy_)][0] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*dummy_)][0];
transformed_cube_model_[i+newsize_*(j+(model_).size_y*dummy_)][1] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*dummy_)][1];
}
}
}
/* Now the rest has to be done, v ranges from 1, ..., N_v-1/2, and using the symmetrics of the gaussian we fill the rest */
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < (model_).size_y; ++j) {
for (k = 1; k <= ((model_).size_v-1)/2; ++k) {
expresult = fftgaussian((i <= cubesizexhalf_) ? i : (i-(model_).size_x), (j <= cubesizeyhalf_) ? j : (j-(model_).size_y), k, expofacsfft_, veloarray_);
transformed_cube_model_[i+newsize_*(j+(model_).size_y*k)][0] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*k)][0];
transformed_cube_model_[i+newsize_*(j+(model_).size_y*k)][1] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*k)][1];
/* Because of the symmetry, f(v) = f(-v), we can safe quite some calculations */
transformed_cube_model_[i+newsize_*(j+(model_).size_y*((model_).size_v-k))][0] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*((model_).size_v-k))][0];
transformed_cube_model_[i+newsize_*(j+(model_).size_y*((model_).size_v-k))][1] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*((model_).size_v-k))][1];
}
}
}
/* Now do the backtransformation */
fftwf_execute(plin_model_);
return &model_;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Convolve a cube with a gaussian via fft */
static Cube *convolgaussfft_here_single(void)
{
int i, j;
float expresult; /* A dummy */
/* Now do the transform */
fftwf_execute(plan_model_);
/* multiply with the gaussian, first axis y, second x */
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < (model_).size_y; ++j) {
expresult = fftgaussian2d((i <= cubesizexhalf_) ? i : (i-(model_).size_x), (j <= cubesizeyhalf_) ? j : (j-(model_).size_y), expofacsfft_);
transformed_cube_model_[i+newsize_*j][0] = expresult*transformed_cube_model_[i+newsize_*j][0];
transformed_cube_model_[i+newsize_*j][1] = expresult*transformed_cube_model_[i+newsize_*j][1];
}
}
/* Now do the backtransformation */
fftwf_execute(plin_model_);
return &model_;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Convolve the input cube with a gaussian via fft to the weightmap, adding a constant offset */
static Cube *convolgaussfft_noise(void)
{
int i, j, k;
float expresult; /* A dummy */
/* Now do the transform */
fftwf_execute(plan_noise_);
/* fftwf_execute(plin_noise_); */
/* return NULL; */
/* multiply with the gaussian, first for nu_v = 0 */
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < model_.size_y; ++j) {
/* The exponential will be evaluated from 0, ... , N/2 and -1, ..., -N/2 or -N/2 - 1 */
expresult = fftgaussian2d((i <= cubesizexhalf_) ? i : (i-model_.size_x), (j <= cubesizeyhalf_) ? j : (j-model_.size_y), expofacsfft_noise_);
transformed_cube_noise_[i+newsize_*j][0] = expresult*transformed_cube_noise_[i+newsize_*j][0];
transformed_cube_noise_[i+newsize_*j][1] = expresult*transformed_cube_noise_[i+newsize_*j][1];
}
}
/* Check for an extra-axis in v, i.e. if the dimension in v is even, we have to calculate one v-plane separately */
if (!(model_.size_v % 2)) {
/* multiply with the gaussian, first for nu_v = N_v/2 */
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < model_.size_y; ++j)
{
/* The exponential will be evaluated from 0, ... , N/2 and -1, ..., -N/2 or -N/2 - 1 */
expresult = fftgaussian((i <= cubesizexhalf_) ? i : (i-model_.size_x), (j <= cubesizeyhalf_) ? j : (j-model_.size_y), dummy_, expofacsfft_noise_, veloarray_noise_);
transformed_cube_noise_[i+newsize_*(j+model_.size_y*dummy_)][0] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*dummy_)][0];
transformed_cube_noise_[i+newsize_*(j+model_.size_y*dummy_)][1] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*dummy_)][1];
}
}
}
/* Now the rest has to be done, v ranges from 1, ..., N_v-1/2, and using the symmetrics of the gaussian we fill the rest */
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < model_.size_y; ++j) {
for (k = 1; k <= (model_.size_v-1)/2; ++k) {
expresult = fftgaussian((i <= cubesizexhalf_) ? i : (i-model_.size_x), (j <= cubesizeyhalf_) ? j : (j-model_.size_y), k, expofacsfft_noise_, veloarray_noise_);
transformed_cube_noise_[i+newsize_*(j+model_.size_y*k)][0] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*k)][0];
transformed_cube_noise_[i+newsize_*(j+model_.size_y*k)][1] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*k)][1];
/* Because of the symmetry, f(v) = f(-v), we can safe quite some calculations */
transformed_cube_noise_[i+newsize_*(j+model_.size_y*(model_.size_v-k))][0] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*(model_.size_v-k))][0];
transformed_cube_noise_[i+newsize_*(j+model_.size_y*(model_.size_v-k))][1] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*(model_.size_v-k))][1];
}
}
}
/* Now add the constant square of the noise */
transformed_cube_noise_[0][0] = transformed_cube_noise_[0][0] + noise_.scale;
/* Now do the backtransformation */
fftwf_execute(plin_noise_);
return &noise_;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Convolve the input cube with a gaussian via fft to the weightmap, adding a constant offset */
static Cube *convolgaussfft_noise_single(void)
{
int i, j;
float expresult; /* A dummy */
/* Now do the transform */
fftwf_execute(plan_noise_);
/* multiply with the gaussian, first axis y, second x */
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < model_.size_y; ++j) {
expresult = fftgaussian2d((i <= cubesizexhalf_) ? i : (i-model_.size_x), (j <= cubesizeyhalf_) ? j : (j-model_.size_y), expofacsfft_noise_);
transformed_cube_noise_[i+newsize_*j][0] = expresult*transformed_cube_noise_[i+newsize_*j][0];
transformed_cube_noise_[i+newsize_*j][1] = expresult*transformed_cube_noise_[i+newsize_*j][1];
}
}
/* Now add the constant square of the noise */
transformed_cube_noise_[0][0] = transformed_cube_noise_[0][0] + noise_.scale;
/* Now do the backtransformation */
fftwf_execute(plin_noise_);
return &noise_;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Calculate factors needed by convolgaussfft */
static float *expofacsfft_here(float sigma_maj, float sigma_min, float *sincosofangle)
{
float *expofacs;
if ((sincosofangle && (expofacs = (float *) malloc(5*sizeof(float))))) {
/* First content is the factor to put before (n_x/N_x)^2 */
expofacs[0] = -2*PI_HERE*PI_HERE*(sigma_min*sigma_min*sincosofangle[1]*sincosofangle[1]+sigma_maj*sigma_maj*sincosofangle[0]*sincosofangle[0])/(original_.size_x*original_.size_x);
/* Second content is the factor to put before (n_x/N_x)(n_y/N_y) */
expofacs[1] = -4*PI_HERE*PI_HERE*sincosofangle[0]*sincosofangle[1]*(sigma_min*sigma_min-sigma_maj*sigma_maj)/(original_.size_x*original_.size_y);
/* Third content is the factor to put before (n_y/N_y)^2 */
expofacs[2] = -2*PI_HERE*PI_HERE*(sigma_min*sigma_min*sincosofangle[0]*sincosofangle[0]+sigma_maj*sigma_maj*sincosofangle[1]*sincosofangle[1])/(original_.size_y*original_.size_y);
/* Fifth component is the normalisation factor due to the width of the gaussians. This is not a factor to put in the exponent though. Here we have to care if only one direction conovolution is desired */
if (sigma_maj == 0)
sigma_maj = 1.0/sqrtf(2*PI_HERE);
if (sigma_min == 0)
sigma_min = 1.0/sqrtf(2*PI_HERE);
expofacs[4] = original_.scale*2*PI_HERE*sigma_min*sigma_maj/(original_.size_v*original_.size_y*original_.size_x);
}
else
expofacs = NULL;
return expofacs;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Calculate factors needed by convolgaussfft */
static void changeexpofacsfft_noise(float sigma_v)
{
int i;
/* Fourth content is the factor to put before (n_v/N_v)^2 */
expofacsfft_noise_[3] = sigma_v*sigma_v*noiseconstant_1_;
if ((sigma_v)) {
expofacsfft_noise_[4] = noiseconstant_2_/sigma_v;
/* Now fill the veloarray */
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < model_.size_v/2+1; ++i) {
veloarray_noise_[i] = expf(expofacsfft_noise_[3]*i*i)*expofacsfft_noise_[4];
}
}
else {
expofacsfft_noise_[4] = 2*SQRTPI*noiseconstant_2_;
/* Now fill the veloarray */
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < model_.size_v/2+1; ++i) {
veloarray_noise_[i] = expf(expofacsfft_noise_[3]*i*i)*expofacsfft_noise_[4];
}
}
return;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Calculate factors needed by convolgaussfft */
static void changeexpofacsfft(float sigma_v)
{
int i;
/* Fourth content is the factor to put before (n_v/N_v)^2 */
expofacsfft_[3] = modelconstant_1_*sigma_v*sigma_v;
/* Now fill the veloarray */
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < model_.size_v/2+1; ++i) {
veloarray_[i] = expf(expofacsfft_[3]*i*i)*expofacsfft_[4];
}
return;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Calculate a gaussian */
static float fftgaussian (int nx, int ny, int nv, float *expofacs, float *veloarray)
{
/* As the trial to safe some time as seen below failed for some reason, we postpone it */
return expf(expofacs[0]*nx*nx+expofacs[1]*nx*ny+expofacs[2]*ny*ny)*veloarray[nv];
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Calculate a gaussian */
static float fftgaussian_array (int nx, int ny, int nv, float *expofacs, float *array, float *veloarray)
{
/* As the trial to safe some time as seen below failed for some reason, we postpone it */
/* return array[nx+expcube_noise_.size_x*ny]*expf(expofacs[3]*nv*nv) * expofacs[4]; */
return array[nx+expcube_noise_.size_x*ny]*veloarray[nv];
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Calculate a gaussian */
static float fftgaussian2d (int nx, int ny, float *expofacs)
{
/* double number; */
/* If the floating point is low enough, we can simply return 0 */
/* if ((number = expf(expofacs[0]*nx*nx+expofacs[1]*nx*ny+expofacs[2]*ny*ny+expofacs[4])) <= MINEXPONENT) */
/* return (float) exp(number); */
/* else */
return expf(expofacs[0]*nx*nx+expofacs[1]*nx*ny+expofacs[2]*ny*ny)*expofacs[4];
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Calculate a gaussian */
static float fftgaussian2d_array (int nx, int ny, float *expofacs, float *array)
{
return array[nx+expcube_noise_.size_x*ny]*expofacs[4];
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Returns the sin and the cosine of an angle in an allocated array */
float *sincosofangle(float angle)
{
float *cossinofangle;
if ((cossinofangle = (float *) malloc(2*sizeof(float)))) {
*cossinofangle = sinf(degreetoradian(angle));
*(cossinofangle+1) = cosf(degreetoradian(angle));
}
return cossinofangle;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Translate from degree to radians */
static float degreetoradian(float degrees)
{
return PI_HERE*degrees/180.0;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Interface to get a cube out of engalmod */
Cube *getoriginal_galmod_(void)
{
return &original_;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Interface to get a cube out of engalmod */
Cube *getmodel_galmod_(void)
{
return &model_;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Interface to get a cube out of engalmod */
Cube *getnoise_galmod_(void)
{
return &noise_;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
static void makemodelarray(float *array)
{
int i,j;
int nx, ny;
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < expcube_model_.size_x; ++i) {
for (j = 0; j < expcube_model_.size_y; ++j) {
nx = (i <= cubesizexhalf_) ? i : (i-(model_).size_x);
ny = (j <= cubesizeyhalf_) ? j : (j-(model_).size_y);
expcube_model_.points[i+expcube_model_.size_x*j] = expofacsfft_[0]*nx*nx+expofacsfft_[1]*nx*ny+expofacsfft_[2]*ny*ny;
}
}
for (i = 0; i < expcube_model_.size_x; ++i) {
for (j = 0; j < expcube_model_.size_y; ++j) {
expcube_model_.points[i+expcube_model_.size_x*j] = expf(array[i+expcube_model_.size_x*j]);
}
}
return;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Convolve a cube with a gaussian via fft */
static Cube *convolgaussfft_here_array(void)
{
int i, j, k;
float expresult; /* A dummy */
/* Convolution in all dimensions or in xy only */
/* Now do the transform */
fftwf_execute(plan_model_);
/* multiply with the gaussian, first for nu_v = 0 */
/* #ifdef OPENMPTIR */
/* #pragma omp parallel for */
/* #endif */
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < (model_).size_y; ++j) {
/* The exponential will be evaluated from 0, ... , N/2 and -1, ..., -N/2 or -N/2 - 1 */
expresult = fftgaussian2d_array(i, j, expofacsfft_, expcube_model_.points);
transformed_cube_model_[i+newsize_*j][0] = expresult*transformed_cube_model_[i+newsize_*j][0];
transformed_cube_model_[i+newsize_*j][1] = expresult*transformed_cube_model_[i+newsize_*j][1];
}
}
/* convolgaussfft_here_array_help1(); */
/* Check for an extra-axis in v, i.e. if the dimension in v is even, we have to calculate one v-plane separately */
if (!((model_).size_v % 2)) {
/* multiply with the gaussian, first for nu_v = N_v/2 */
#ifdef OPENMPTIR
/* pragma omp parallel for */
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < (model_).size_y; ++j) {
/* The exponential will be evaluated from 0, ... , N/2 and -1, ..., -N/2 or -N/2 - 1 */
expresult = fftgaussian_array(i, j, dummy_, expofacsfft_, expcube_model_.points, veloarray_);
transformed_cube_model_[i+newsize_*(j+(model_).size_y*dummy_)][0] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*dummy_)][0];
transformed_cube_model_[i+newsize_*(j+(model_).size_y*dummy_)][1] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*dummy_)][1];
}
}
}
/* Now the rest has to be done, v ranges from 1, ..., N_v-1/2, and using the symmetrics of the gaussian we fill the rest */
/* #ifdef OPENMPTIR */
/* !!! pragma omp parallel for */
/* #endif */
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < (model_).size_y; ++j) {
for (k = 1; k <= ((model_).size_v-1)/2; ++k) {
expresult = fftgaussian_array(i, j, k, expofacsfft_, expcube_model_.points, veloarray_);
transformed_cube_model_[i+newsize_*(j+(model_).size_y*k)][0] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*k)][0];
transformed_cube_model_[i+newsize_*(j+(model_).size_y*k)][1] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*k)][1];
transformed_cube_model_[i+newsize_*(j+(model_).size_y*((model_).size_v-k))][0] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*((model_).size_v-k))][0];
transformed_cube_model_[i+newsize_*(j+(model_).size_y*((model_).size_v-k))][1] = expresult*transformed_cube_model_[i+newsize_*(j+(model_).size_y*((model_).size_v-k))][1];
}
}
}
/* Now do the backtransformation */
fftwf_execute(plin_model_);
return &model_;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Convolve a cube with a gaussian via fft */
static Cube *convolgaussfft_here_single_array(void)
{
int i, j;
float expresult; /* A dummy */
/* Now do the transform */
fftwf_execute(plan_model_);
/* multiply with the gaussian, first axis y, second x */
/* #ifdef OPENMPTIR */
/* !!! pragma omp parallel for */
/* #endif */
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < (model_).size_y; ++j) {
expresult = fftgaussian2d_array(i, j, expofacsfft_, expcube_model_.points);
transformed_cube_model_[i+newsize_*j][0] = expresult*transformed_cube_model_[i+newsize_*j][0];
transformed_cube_model_[i+newsize_*j][1] = expresult*transformed_cube_model_[i+newsize_*j][1];
}
}
/* Now do the backtransformation */
fftwf_execute(plin_model_);
return &model_;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Convolve the input cube with a gaussian via fft to the weightmap, adding a constant offset */
static Cube *convolgaussfft_noise_array(void)
{
int i, j, k;
float expresult; /* A dummy */
/* Now do the transform */
fftwf_execute(plan_noise_);
/* fftwf_execute(plin_noise_); */
/* return NULL; */
/* multiply with the gaussian, first for nu_v = 0 */
#ifdef OPENMPTIR
/* pragma omp parallel for */
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < model_.size_y; ++j) {
/* The exponential will be evaluated from 0, ... , N/2 and -1, ..., -N/2 or -N/2 - 1 */
expresult = fftgaussian2d_array(i, j, expofacsfft_noise_, expcube_noise_.points);
transformed_cube_noise_[i+newsize_*j][0] = expresult*transformed_cube_noise_[i+newsize_*j][0];
transformed_cube_noise_[i+newsize_*j][1] = expresult*transformed_cube_noise_[i+newsize_*j][1];
}
}
if (!(model_.size_v % 2)) {
/* multiply with the gaussian, first for nu_v = N_v/2 */
#ifdef OPENMPTIR
/* pragma omp parallel for */
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < model_.size_y; ++j)
{
/* The exponential will be evaluated from 0, ... , N/2 and -1, ..., -N/2 or -N/2 - 1 */
expresult = fftgaussian_array(i,j, dummy_, expofacsfft_noise_, expcube_noise_.points, veloarray_noise_);
transformed_cube_noise_[i+newsize_*(j+model_.size_y*dummy_)][0] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*dummy_)][0];
transformed_cube_noise_[i+newsize_*(j+model_.size_y*dummy_)][1] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*dummy_)][1];
}
}
}
/* Now the rest has to be done, v ranges from 1, ..., N_v-1/2, and using the symmetrics of the gaussian we fill the rest */
/* #ifdef OPENMPTIR */
/* !!! pragma omp parallel for */
/* #endif */
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < model_.size_y; ++j) {
for (k = 1; k <= (model_.size_v-1)/2; ++k) {
expresult = fftgaussian_array(i,j, k, expofacsfft_noise_,expcube_noise_.points, veloarray_noise_);
transformed_cube_noise_[i+newsize_*(j+model_.size_y*k)][0] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*k)][0];
transformed_cube_noise_[i+newsize_*(j+model_.size_y*k)][1] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*k)][1];
/* Because of the symmetry, f(v) = f(-v), we can safe quite some calculations */
transformed_cube_noise_[i+newsize_*(j+model_.size_y*(model_.size_v-k))][0] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*(model_.size_v-k))][0];
transformed_cube_noise_[i+newsize_*(j+model_.size_y*(model_.size_v-k))][1] = expresult*transformed_cube_noise_[i+newsize_*(j+model_.size_y*(model_.size_v-k))][1];
}
}
}
/* Now add the constant square of the noise */
transformed_cube_noise_[0][0] = transformed_cube_noise_[0][0] + noise_.scale;
/* Now do the backtransformation */
fftwf_execute(plin_noise_);
return &noise_;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Convolve the input cube with a gaussian via fft to the weightmap, adding a constant offset */
static Cube *convolgaussfft_noise_single_array(void)
{
int i, j;
float expresult; /* A dummy */
/* Now do the transform */
fftwf_execute(plan_noise_);
/* multiply with the gaussian, first axis y, second x */
#ifdef OPENMPTIR
/* pragma omp parallel for */
#endif
for (i = 0; i < newsize_; ++i) {
for (j = 0; j < model_.size_y; ++j) {
expresult = fftgaussian2d_array(i,j, expofacsfft_noise_, expcube_noise_.points);
transformed_cube_noise_[i+newsize_*j][0] = expresult*transformed_cube_noise_[i+newsize_*j][0];
transformed_cube_noise_[i+newsize_*j][1] = expresult*transformed_cube_noise_[i+newsize_*j][1];
}
}
/* Now add the constant square of the noise */
transformed_cube_noise_[0][0] = transformed_cube_noise_[0][0] + noise_.scale;
/* Now do the backtransformation */
fftwf_execute(plin_noise_);
return &noise_;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
static void makenoisearray(float *array)
{
int i,j;
int nx, ny;
#ifdef OPENMPTIR
#pragma omp parallel for
#endif
for (i = 0; i < expcube_noise_.size_x; ++i) {
for (j = 0; j < expcube_noise_.size_y; ++j) {
nx = (i <= cubesizexhalf_) ? i : (i-(noise_).size_x);
ny = (j <= cubesizeyhalf_) ? j : (j-(noise_).size_y);
expcube_noise_.points[i+expcube_noise_.size_x*j] = expofacsfft_noise_[0]*nx*nx+expofacsfft_noise_[1]*nx*ny+expofacsfft_noise_[2]*ny*ny;
}
}
/* This is maybe not elegant but may be safe */
/* #ifdef OPENMPTIR */
/* !!! pragma omp parallel for */
/* #endif */
for (i = 0; i < expcube_noise_.size_x; ++i) {
for (j = 0; j < expcube_noise_.size_y; ++j) {
expcube_noise_.points[i+expcube_noise_.size_x*j] = expf(array[i+expcube_noise_.size_x*j]);
}
}
return;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Synonyme of fftwf_malloc */
void *malloc_engalmod(size_t size)
{
return fftwf_malloc(size);
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Synonyme of fftwf_free */
void free_engalmod(void *array)
{
fftwf_free(array);
return;
}
/* ------------------------------------------------------------ */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
$Log: engalmod.c,v $
Revision 1.44 2011/05/25 22:25:26 jozsa
Left work
Revision 1.43 2011/05/11 13:37:12 jozsa
Left work
Revision 1.42 2011/05/10 00:30:15 jozsa
Left work
Revision 1.41 2009/05/26 07:56:40 jozsa
Left work
Revision 1.40 2007/08/22 15:58:40 gjozsa
Left work
Revision 1.39 2006/04/07 11:13:32 gjozsa
simple BUGFIX
Revision 1.38 2006/04/06 10:39:09 gjozsa
Included function engalmod_chflgs
Revision 1.37 2006/04/03 11:47:46 gjozsa
included masking, fixed a mask to be present if pixval < -1024
Revision 1.36 2005/04/20 13:26:24 gjozsa
Left work
Revision 1.35 2005/04/12 15:52:14 gjozsa
Left work
Revision 1.34 2005/04/07 12:45:47 gjozsa
Bugfix
Revision 1.33 2005/04/06 05:58:24 gjozsa
Bugfix: init now corrects the noiseweight to 1 in case of mode%2
Revision 1.32 2005/04/04 08:42:19 gjozsa
Left work
Revision 1.31 2005/04/01 12:37:11 gjozsa
Large improvements, repeated calls with same velocity dispersion are much faster
Revision 1.29 2005/03/11 17:45:54 gjozsa
Left work
Revision 1.28 2005/03/04 18:13:53 gjozsa
Left work
Revision 1.27 2005/03/02 17:56:09 gjozsa
Left work
Revision 1.26 2005/01/17 12:13:34 gjozsa
Left work
Revision 1.25 2005/01/06 10:44:10 gjozsa
Left work
Revision 1.24 2005/01/05 15:33:02 gjozsa
Left work
Revision 1.23 2004/12/30 13:36:05 gjozsa
Added probability evaluation and out-of-place fft
Revision 1.22 2004/12/27 12:54:40 gjozsa
Last updatde before commenting, no changes anymore allowed
Revision 1.21 2004/12/23 20:20:50 gjozsa
some minor changes, leaves the implementation of arbitrary arrays
Revision 1.18 2004/12/22 17:33:57 gjozsa
Left work
Revision 1.14 2004/12/21 18:42:12 gjozsa
Left work
Revision 1.10 2004/12/21 17:50:21 gjozsa
some changes
Revision 1.7 2004/12/20 14:55:58 gjozsa
Left work
Revision 1.5 2004/12/20 10:44:12 gjozsa
added
Revision 1.4 2004/12/17 14:13:40 gjozsa
First debugged running version
Revision 1.3 2004/12/16 13:19:51 gjozsa
Left work
Revision 1.1 2004/12/11 17:44:51 gjozsa
Added to CVS control
------------------------------------------------------------ */
|
GB_binop__minus_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__minus_uint32
// A.*B function (eWiseMult): GB_AemultB__minus_uint32
// A*D function (colscale): GB_AxD__minus_uint32
// D*A function (rowscale): GB_DxB__minus_uint32
// C+=B function (dense accum): GB_Cdense_accumB__minus_uint32
// C+=b function (dense accum): GB_Cdense_accumb__minus_uint32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_uint32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_uint32
// C=scalar+B GB_bind1st__minus_uint32
// C=scalar+B' GB_bind1st_tran__minus_uint32
// C=A+scalar GB_bind2nd__minus_uint32
// C=A'+scalar GB_bind2nd_tran__minus_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x - y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_UINT32 || GxB_NO_MINUS_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__minus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__minus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__minus_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__minus_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__minus_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__minus_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__minus_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__minus_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__minus_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__minus_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB_bind1st_tran__minus_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB_bind2nd_tran__minus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This represents the stack of attributes that were pushed by
/// \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
SmallVector<PragmaAttributeEntry, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
clang::Module *Module = nullptr;
bool ModuleInterface = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
Partition, ///< 'module partition X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path);
/// The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
bool Implicit,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
/// Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf ///< Condition in a constexpr if statement.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
///
/// \param AllowTopLevelCond Whether to allow the result to be the
/// complete top-level condition.
std::pair<Expr *, std::string>
findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive,
bool allowArrayTypes);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
bool IsConstexprSpecified);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, IdentifierInfo *Id,
LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id,
bool DirectInit, Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation());
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false,
bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// Called on well-formed '\#pragma clang attribute push'.
void ActOnPragmaAttributePush(ParsedAttr &Attribute, SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
void AddNSConsumedAttr(SourceRange AttrRange, Decl *D,
unsigned SpellingListIndex, bool isNSConsumed,
bool isTemplateInstantiation);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Set to true inside '#pragma omp declare target' region.
bool IsInOpenMPDeclareTargetContext = false;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D) const;
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return IsInOpenMPDeclareTargetContext;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return true if (un)supported features for the current target should be
/// diagnosed if OpenMP (offloading) is enabled.
bool shouldDiagnoseTargetSupportFromOpenMP() const {
return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() ||
isInOpenMPTargetExecutionDirective();
}
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation DepLinMapLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *ActOnOpenMPToClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
CUDADeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
CUDAKnownEmittedFns;
/// A partial call graph maintained during CUDA compilation to support
/// deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to CUDAKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
CUDACallGraph;
/// Diagnostic builder for CUDA errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class CUDADiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
CUDADiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
~CUDADiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (CUDADiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a CUDADiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const CUDADiagBuilder &operator<<(const CUDADiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiag.hasValue())
*Diag.PartialDiag << Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<PartialDiagnostic> PartialDiag;
};
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
CUDADiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
CUDADiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDefaultedMemberExceptionSpecs.empty() &&
"there shouldn't be any pending delayed defaulted member "
"exception specs");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedExceptionSpecChecks) SavedExceptionSpecChecks;
decltype(DelayedDefaultedMemberExceptionSpecs)
SavedDefaultedMemberExceptionSpecs;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedExceptionSpecChecks.swap(S.DelayedExceptionSpecChecks);
SavedDefaultedMemberExceptionSpecs.swap(
S.DelayedDefaultedMemberExceptionSpecs);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
gctpc_ll2xy.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "proj.h"
#include "grb2.h"
#include "wgrib2.h"
#include "fnlist.h"
/* gctpc_aux.c interface routines to the gctpc library
2/2012 Public Domain Wesley Ebisuzaki
gctpc_get_latlon: fill grid with lat/lon values
mercator
polar stereographic
lambert conformal
Albers equal area
10/2015 lambert azimuthal equal area
*/
/* M_PI, M_PI_2, M_PI_4, and M_SQRT2 are not ANSI C but are commonly defined */
/* values from GNU C library version of math.h copyright Free Software Foundation, Inc. */
#ifndef M_PI
#define M_PI 3.14159265358979323846 /* pi */
#endif
#ifndef M_PI_2
#define M_PI_2 1.57079632679489661923 /* pi/2 */
#endif
#ifndef M_PI_4
#define M_PI_4 0.78539816339744830962 /* pi/4 */
#endif
#ifndef M_SQRT2
#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */
#endif
/*
* given vectors of lon-lat
* return i-j vectors
*
* step 1: find (x,y) of 1st point
* step 2: find (x,y) of input point
*
* input:
* **sec = grib sections of grid
* **grid_lon = longitues of grid
* **grid_lat = latitudes of grid
* n = number of point to convert
* *lon = longitudes of points to convert to (i,j)
* *lat = longitudes of points to convert to (i,j)
*
* output:
* x[n] coordinates on grid
* y[n]
*
* assumption: grid_lon, grid_lat is in wesn order
*
* to use:
*
* setup
* int gctpc_ll2xy_init(unsigned char **sec, double *grid_lon, double *grid_lat);
*
* get (x,y)
* int gctpc_ll2xy(int n, double *lon, double *lat, double *x, double *y);
* get nearest neighbor
* int gctpc_ll2i(int n, double *lon, double *lat, unsigned int *i);
*
*/
static long int (*forward_fn)();
static double dx, dy, inv_dx, inv_dy, x_0, y_0, x00, xN;
static unsigned int gdt;
static int nx, ny;
int gctpc_ll2xy_init(unsigned char **sec, double *grid_lon, double *grid_lat) {
unsigned char *gds;
double r_maj; /* major axis */
double r_min; /* minor axis */
double lat1; /* first standard parallel */
double lat2; /* second standard parallel */
double c_lon; /* center longitude */
double c_lat; /* center latitude */
double false_east; /* x offset in meters */
double false_north;
int nres, nscan;
unsigned int npnts;
long long_i;
double rlon, rlat;
gdt = code_table_3_1(sec);
gds = sec[3];
/* only process certain grids */
forward_fn = NULL;
if (grid_lon == NULL || grid_lat == NULL) return 1;
if (gdt != 0 && !(gdt == 10 && GDS_Mercator_ori_angle(gds) == 0.0) && gdt != 20 && gdt != 30 && gdt != 140) return 1;
get_nxny(sec, &nx, &ny, &npnts, &nres, &nscan);
if (nx == -1 || ny == -1 || nx*ny != npnts) return 1;
/* only process certain grids */
x_0 = y_0 = x00 = xN = inv_dx = inv_dy = 0.0;
if (gdt == 0) { /* lat-lon grid */
dx = grid_lon[1] - grid_lon[0];
dy = grid_lat[nx] - grid_lat[0];
inv_dx = 1.0 / dx;
inv_dy = 1.0 / dy;
x_0 = grid_lon[0];
x00 = grid_lon[0] - 0.5*dx;
xN = grid_lon[nx-1] + 0.5*dx;
y_0 = grid_lat[0];
return 0;
}
else if (gdt == 10 && (GDS_Mercator_ori_angle(gds) == 0.0) ) { // mercator no rotation
/* get earth axis */
axes_earth(sec, &r_maj, &r_min);
dx = GDS_Mercator_dx(gds);
dy = GDS_Mercator_dy(gds);
inv_dx = 1.0 / dx;
inv_dy = 1.0 / dy;
/* central point */
c_lon = 0.0;
c_lat = GDS_Mercator_latD(gds) * (M_PI/180.0);
/* find the eastling and northing of of the 1st grid point */
false_east = false_north = 0.0;
long_i = merforint(r_maj,r_min,c_lon,c_lat,false_east,false_north);
if (long_i) fatal_error_i("gctpc_ll2xy_init merforint: return %ld", long_i);
rlat = GDS_Mercator_lat1(gds) * (M_PI/180.0);
rlon = GDS_Mercator_lon1(gds) * (M_PI/180.0);
long_i = merfor(rlon, rlat, &x_0, &y_0);
x00 = x_0 - 0.5*dx;
xN = x_0 + (nx-0.5)*dx;
forward_fn = &merfor;
}
else if (gdt == 20) { // polar stereographic
/* get earth axis */
axes_earth(sec, &r_maj, &r_min);
dy = GDS_Polar_dy(gds);
dx = GDS_Polar_dx(gds);
inv_dx = 1.0 / dx;
inv_dy = 1.0 / dy;
/* central point */
c_lon = GDS_Polar_lov(gds) * (M_PI/180.0);
c_lat = GDS_Polar_lad(gds) * (M_PI/180.0);
/* find the eastling and northing of of the 1st grid point */
false_east = false_north = 0.0;
long_i = psforint(r_maj,r_min,c_lon,c_lat,false_east,false_north);
if (long_i) fatal_error_i("gctpc_ll2xy_init psforint: return %ld", long_i);
rlon = grid_lon[0] * (M_PI/180.0);
rlat = grid_lat[0] * (M_PI/180.0);
long_i = psfor(rlon, rlat, &x_0, &y_0);
x00 = x_0 - 0.5*dx;
xN = x_0 + (nx-0.5)*dx;
forward_fn = &psfor;
}
else if (gdt == 30) { // lambert conformal conic
/* get earth axis */
axes_earth(sec, &r_maj, &r_min);
dx = GDS_Lambert_dx(gds);
dy = GDS_Lambert_dy(gds);
inv_dx = 1.0 / dx;
inv_dy = 1.0 / dy;
/* latitudes of tangent/intersection */
lat1 = GDS_Lambert_Latin1(gds) * (M_PI/180.0);
lat2 = GDS_Lambert_Latin2(gds) * (M_PI/180.0);
/* central point */
c_lon = GDS_Lambert_Lov(gds) * (M_PI/180.0);
c_lat = GDS_Lambert_LatD(gds) * (M_PI/180.0);
/* find the eastling and northing of of the 1st grid point */
false_east = false_north = 0.0;
long_i = lamccforint(r_maj,r_min,lat1,lat2,c_lon,c_lat,false_east,false_north);
if (long_i) fatal_error_i("gctpc_ll2xy_init lamccforint: return %ld", long_i);
rlon = grid_lon[0] * (M_PI/180.0);
rlat = grid_lat[0] * (M_PI/180.0);
long_i = lamccfor(rlon, rlat, &x_0, &y_0);
x00 = x_0 - 0.5*dx;
xN = x_0 + (nx-0.5)*dx;
forward_fn = &lamccfor;
}
else if (gdt == 140) { // lambert azimuthal equal area
/* get earth axis */
axes_earth(sec, &r_maj, &r_min);
r_maj = 0.5 * (r_maj + r_min);
dx = GDS_Lambert_Az_dx(gds);
dy = GDS_Lambert_Az_dy(gds);
inv_dx = 1.0 / dx;
inv_dy = 1.0 / dy;
/* central point */
c_lon = GDS_Lambert_Az_Cen_Lon(gds) * (M_PI/180.0);
c_lat = GDS_Lambert_Az_Std_Par(gds) * (M_PI/180.0);
false_east = false_north = 0.0;
long_i = lamazforint(r_maj,c_lon,c_lat,false_east,false_north);
if (long_i) fatal_error_i("gctpc_ll2xy_init lamazforint: return %ld", long_i);
rlon = grid_lon[0] * (M_PI/180.0);
rlat = grid_lat[0] * (M_PI/180.0);
long_i = lamazfor(rlon, rlat, &x_0, &y_0);
x00 = x_0 - 0.5*dx;
xN = x_0 + (nx-0.5)*dx;
forward_fn = &lamazfor;
}
return forward_fn != NULL ? 0 : 1;
}
int gctpc_ll2xy(int n, double *lon, double *lat, double *x, double *y) {
int i;
double rlon, rlat;
if (gdt == 0) { // lat-lon
// #pragma omp parallel for schedule(static) private(i,rlon,rlat)
for (i = 0; i < n; i++) {
rlon = lon[i];
if (rlon > xN) rlon -= 360.0;
if (rlon < x00) rlon += 360.0;
rlat = lat[i];
x[i] = (rlon - x_0) * inv_dx;
y[i] = (rlat - y_0) * inv_dy;
}
return 0;
}
if (forward_fn == NULL) return 1;
// #pragma omp parallel for private(i,rlon,rlat)
for (i = 0; i < n; i++) {
rlon = lon[i];
if (rlon > xN) rlon -= 360.0;
if (rlon < x00) rlon += 360.0;
rlon *= (M_PI/180.0);
rlat = lat[i] * (M_PI/180.0);
forward_fn(rlon, rlat, x+i, y+i);
x[i] = (x[i] - x_0)*inv_dx;
y[i] = (y[i] - y_0)*inv_dy;
}
return 0;
}
/* iptr[] == 0 for out of bounds */
int gctpc_ll2i(int n, double *lon, double *lat, unsigned int *ipnt) {
int i;
unsigned int ix, iy;
double rlon, rlat, x, y;
if (gdt == 0) { // lat-lon
// #pragma omp parallel for schedule(static) private(i,rlon,rlat,ix,iy, x, y)
for (i = 0; i < n; i++) {
rlon = lon[i];
if (rlon > xN) rlon -= 360.0;
if (rlon < x00) rlon += 360.0;
rlat = lat[i];
ix = x = floor((rlon - x_0) * inv_dx + 0.5);
iy = y = floor((rlat - y_0) * inv_dy + 0.5);
if (x < 0 || x >= nx || y < 0 || y >= ny) {
ipnt[i] = 0;
}
else {
ipnt[i] = ix + nx*iy + 1;
}
}
return 0;
}
if (forward_fn == NULL) return 1;
// #pragma omp parallel for schedule(static) private(i,rlon,rlat,ix,iy, x, y)
for (i = 0; i < n; i++) {
rlon = lon[i];
if (rlon > xN) rlon -= 360.0;
if (rlon < x00) rlon += 360.0;
rlon *= (M_PI/180.0);
rlat = lat[i] * (M_PI/180.0);
forward_fn(rlon, rlat, &x, &y);
ix = x = floor((x - x_0)*inv_dx + 0.5);
iy = y = floor((y - y_0)*inv_dy + 0.5);
if (x < 0 || x >= nx || y < 0 || y >= ny) {
ipnt[i] = 0;
}
else {
ipnt[i] = ix + nx*iy + 1;
}
}
return 0;
}
|
mic_iloop.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef VECTOR_SIZE
#define VECTOR_SIZE 100
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
void count_sort(int a[], int n) {
int i, j, count;
int *temp = malloc(n*sizeof(int));
#pragma offload target(mic) in(a:length(n)) out(temp:length(n))
{
#pragma omp parallel for default(none) private(i, j, count) shared(a, temp, n)
for (i = 0; i < n; i++) {
count = 0;
for (j = 0; j < n; j++) {
if (a[j] < a[i])
count++;
else if (a[j] == a[i] && j < i)
count++;
}
temp[count] = a[i];
}
}
#ifndef _OPENMP
memcpy(a, temp, n*sizeof(int));
#else
#pragma omp parallel
{
int thread_count = omp_get_num_threads();
int local_n = n / thread_count;
#pragma omp parallel for default(none) private(i) shared(n, a, temp, local_n, thread_count)
for (int i = 0; i < thread_count; i++) {
int start = i * local_n;
int nelem = local_n;
if (i == thread_count - 1) {
nelem = n - start;
}
memcpy(a + start, temp + start, nelem * sizeof(int));
}
}
#endif
free(temp);
} /* Count sort */
int init_vector(int vect[]) {
for (int i = 0; i < VECTOR_SIZE; ++i ) {
vect[i] = rand() % 100;
}
return 0;
}
int main(int argc, char** argv) {
int vector[VECTOR_SIZE];
init_vector(vector);
count_sort(vector, VECTOR_SIZE);
return 0;
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 8;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-4,8)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(16*t1+Ny+29,8)),floord(32*t2+Ny+28,8)),floord(32*t1-32*t2+Nz+Ny+27,8));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(32*t2-Nz-252,256)),ceild(8*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(16*t1+Nx+29,256)),floord(32*t2+Nx+28,256)),floord(8*t3+Nx+4,256)),floord(32*t1-32*t2+Nz+Nx+27,256));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),8*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),8*t3+6),256*t4+254),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
convolution_winograd_transform_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_input_pack4_sse(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 6;
const int h_tiles = (h - 2) / 6;
const int tiles = w_tiles * h_tiles;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
#ifdef _MSC_VER
__declspec(align(16))
#else
__attribute__((aligned(16)))
#endif
float tmp[8][8][4];
__m128 _v5_25 = _mm_set1_ps(5.25f);
__m128 _vm4_25 = _mm_set1_ps(-4.25f);
__m128 _vm1_25 = _mm_set1_ps(-1.25f);
__m128 _v0_25 = _mm_set1_ps(0.25f);
__m128 _vm2_5 = _mm_set1_ps(-2.5f);
__m128 _v0_5 = _mm_set1_ps(0.5f);
__m128 _v2 = _mm_set1_ps(2.f);
__m128 _v4 = _mm_set1_ps(4.f);
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
__m128 _r00 = _mm_load_ps(r0);
__m128 _r01 = _mm_load_ps(r0 + 4);
__m128 _r02 = _mm_load_ps(r0 + 4 * 2);
__m128 _r03 = _mm_load_ps(r0 + 4 * 3);
__m128 _r04 = _mm_load_ps(r0 + 4 * 4);
__m128 _r05 = _mm_load_ps(r0 + 4 * 5);
__m128 _r06 = _mm_load_ps(r0 + 4 * 6);
__m128 _r07 = _mm_load_ps(r0 + 4 * 7);
__m128 _tmp0m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r04, _r02), _mm_sub_ps(_r00, _r06));
__m128 _tmp7m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r03, _r05), _mm_sub_ps(_r07, _r01));
_mm_store_ps(tmp[0][m], _tmp0m);
_mm_store_ps(tmp[7][m], _tmp7m);
__m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _r04, _mm_add_ps(_r02, _r06));
__m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _r03, _mm_add_ps(_r01, _r05));
__m128 _tmp1m = _mm_add_ps(_tmp12a, _tmp12b);
__m128 _tmp2m = _mm_sub_ps(_tmp12a, _tmp12b);
_mm_store_ps(tmp[1][m], _tmp1m);
_mm_store_ps(tmp[2][m], _tmp2m);
__m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _r04, _mm_comp_fmadd_ps(_v0_25, _r02, _r06));
__m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v0_5)));
__m128 _tmp3m = _mm_add_ps(_tmp34a, _tmp34b);
__m128 _tmp4m = _mm_sub_ps(_tmp34a, _tmp34b);
_mm_store_ps(tmp[3][m], _tmp3m);
_mm_store_ps(tmp[4][m], _tmp4m);
__m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _r04, _r02), _r06);
__m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v2)));
__m128 _tmp5m = _mm_add_ps(_tmp56a, _tmp56b);
__m128 _tmp6m = _mm_sub_ps(_tmp56a, _tmp56b);
_mm_store_ps(tmp[5][m], _tmp5m);
_mm_store_ps(tmp[6][m], _tmp6m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3;
float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4;
float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5;
float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6;
float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7;
for (int m = 0; m < 8; m++)
{
__m128 _tmp00 = _mm_load_ps(tmp[m][0]);
__m128 _tmp01 = _mm_load_ps(tmp[m][1]);
__m128 _tmp02 = _mm_load_ps(tmp[m][2]);
__m128 _tmp03 = _mm_load_ps(tmp[m][3]);
__m128 _tmp04 = _mm_load_ps(tmp[m][4]);
__m128 _tmp05 = _mm_load_ps(tmp[m][5]);
__m128 _tmp06 = _mm_load_ps(tmp[m][6]);
__m128 _tmp07 = _mm_load_ps(tmp[m][7]);
__m128 _r0tm0 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp04, _tmp02), _mm_sub_ps(_tmp00, _tmp06));
__m128 _r0tm7 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp03, _tmp05), _mm_sub_ps(_tmp07, _tmp01));
__m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _tmp04, _mm_add_ps(_tmp02, _tmp06));
__m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _tmp03, _mm_add_ps(_tmp01, _tmp05));
__m128 _r0tm1 = _mm_add_ps(_tmp12a, _tmp12b);
__m128 _r0tm2 = _mm_sub_ps(_tmp12a, _tmp12b);
__m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _tmp04, _mm_comp_fmadd_ps(_v0_25, _tmp02, _tmp06));
__m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v0_5)));
__m128 _r0tm3 = _mm_add_ps(_tmp34a, _tmp34b);
__m128 _r0tm4 = _mm_sub_ps(_tmp34a, _tmp34b);
__m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _tmp04, _tmp02), _tmp06);
__m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v2)));
__m128 _r0tm5 = _mm_add_ps(_tmp56a, _tmp56b);
__m128 _r0tm6 = _mm_sub_ps(_tmp56a, _tmp56b);
_mm_store_ps(r0_tm_0, _r0tm0);
_mm_store_ps(r0_tm_1, _r0tm1);
_mm_store_ps(r0_tm_2, _r0tm2);
_mm_store_ps(r0_tm_3, _r0tm3);
_mm_store_ps(r0_tm_4, _r0tm4);
_mm_store_ps(r0_tm_5, _r0tm5);
_mm_store_ps(r0_tm_6, _r0tm6);
_mm_store_ps(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 4 * 8;
r0_tm_1 += tiles * 4 * 8;
r0_tm_2 += tiles * 4 * 8;
r0_tm_3 += tiles * 4 * 8;
r0_tm_4 += tiles * 4 * 8;
r0_tm_5 += tiles * 4 * 8;
r0_tm_6 += tiles * 4 * 8;
r0_tm_7 += tiles * 4 * 8;
}
}
}
}
}
static void conv3x3s1_winograd63_transform_output_pack4_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
__m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps();
#ifdef _MSC_VER
__declspec(align(16))
#else
__attribute__((aligned(16)))
#endif
float tmp[6][8][4];
__m128 _v32 = _mm_set1_ps(32.f);
__m128 _v16 = _mm_set1_ps(16.f);
__m128 _v8 = _mm_set1_ps(8.f);
__m128 _v4 = _mm_set1_ps(4.f);
__m128 _v2 = _mm_set1_ps(2.f);
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5;
const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6;
const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7;
float* output0 = out0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
__m128 _out0tm0 = _mm_load_ps(output0_tm_0);
__m128 _out0tm1 = _mm_load_ps(output0_tm_1);
__m128 _out0tm2 = _mm_load_ps(output0_tm_2);
__m128 _out0tm3 = _mm_load_ps(output0_tm_3);
__m128 _out0tm4 = _mm_load_ps(output0_tm_4);
__m128 _out0tm5 = _mm_load_ps(output0_tm_5);
__m128 _out0tm6 = _mm_load_ps(output0_tm_6);
__m128 _out0tm7 = _mm_load_ps(output0_tm_7);
__m128 _tmp024a = _mm_add_ps(_out0tm1, _out0tm2);
__m128 _tmp135a = _mm_sub_ps(_out0tm1, _out0tm2);
__m128 _tmp024b = _mm_add_ps(_out0tm3, _out0tm4);
__m128 _tmp135b = _mm_sub_ps(_out0tm3, _out0tm4);
__m128 _tmp024c = _mm_add_ps(_out0tm5, _out0tm6);
__m128 _tmp135c = _mm_sub_ps(_out0tm5, _out0tm6);
__m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b));
__m128 _tmp2m = _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a));
__m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a));
_mm_store_ps(tmp[0][m], _tmp0m);
_mm_store_ps(tmp[2][m], _tmp2m);
_mm_store_ps(tmp[4][m], _tmp4m);
__m128 _tmp1m = _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a));
__m128 _tmp3m = _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a));
__m128 _tmp5m = _mm_add_ps(_mm_add_ps(_out0tm7, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c));
_mm_store_ps(tmp[1][m], _tmp1m);
_mm_store_ps(tmp[3][m], _tmp3m);
_mm_store_ps(tmp[5][m], _tmp5m);
output0_tm_0 += tiles * 4 * 8;
output0_tm_1 += tiles * 4 * 8;
output0_tm_2 += tiles * 4 * 8;
output0_tm_3 += tiles * 4 * 8;
output0_tm_4 += tiles * 4 * 8;
output0_tm_5 += tiles * 4 * 8;
output0_tm_6 += tiles * 4 * 8;
output0_tm_7 += tiles * 4 * 8;
}
for (int m = 0; m < 6; m++)
{
__m128 _tmp00 = _mm_load_ps(tmp[m][0]);
__m128 _tmp01 = _mm_load_ps(tmp[m][1]);
__m128 _tmp02 = _mm_load_ps(tmp[m][2]);
__m128 _tmp03 = _mm_load_ps(tmp[m][3]);
__m128 _tmp04 = _mm_load_ps(tmp[m][4]);
__m128 _tmp05 = _mm_load_ps(tmp[m][5]);
__m128 _tmp06 = _mm_load_ps(tmp[m][6]);
__m128 _tmp07 = _mm_load_ps(tmp[m][7]);
__m128 _tmp024a = _mm_add_ps(_tmp01, _tmp02);
__m128 _tmp135a = _mm_sub_ps(_tmp01, _tmp02);
__m128 _tmp024b = _mm_add_ps(_tmp03, _tmp04);
__m128 _tmp135b = _mm_sub_ps(_tmp03, _tmp04);
__m128 _tmp024c = _mm_add_ps(_tmp05, _tmp06);
__m128 _tmp135c = _mm_sub_ps(_tmp05, _tmp06);
__m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b)));
__m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a)));
__m128 _out04 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a)));
_mm_store_ps(output0, _out00);
_mm_store_ps(output0 + 4 * 2, _out02);
_mm_store_ps(output0 + 4 * 4, _out04);
__m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a)));
__m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a)));
__m128 _out05 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp07, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c)));
_mm_store_ps(output0 + 4, _out01);
_mm_store_ps(output0 + 4 * 3, _out03);
_mm_store_ps(output0 + 4 * 5, _out05);
output0 += outw * 4;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_input_pack4_sse(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 4;
const int h_tiles = (h - 2) / 4;
const int tiles = w_tiles * h_tiles;
// const float itm[6][6] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
#ifdef _MSC_VER
__declspec(align(16))
#else
__attribute__((aligned(16)))
#endif
float tmp[6][6][4];
__m128 _vm5 = _mm_set1_ps(-5.f);
__m128 _vm4 = _mm_set1_ps(-4.f);
__m128 _v4 = _mm_set1_ps(4.f);
__m128 _vm2 = _mm_set1_ps(-2.f);
__m128 _v2 = _mm_set1_ps(2.f);
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
__m128 _r00 = _mm_load_ps(r0);
__m128 _r01 = _mm_load_ps(r0 + 4);
__m128 _r02 = _mm_load_ps(r0 + 4 * 2);
__m128 _r03 = _mm_load_ps(r0 + 4 * 3);
__m128 _r04 = _mm_load_ps(r0 + 4 * 4);
__m128 _r05 = _mm_load_ps(r0 + 4 * 5);
__m128 _tmp0m = _mm_comp_fmadd_ps(_vm5, _r02, _mm_comp_fmadd_ps(_v4, _r00, _r04));
__m128 _tmp1m = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_r01, _r02), _mm_add_ps(_r04, _r03));
__m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_r01, _r02), _mm_sub_ps(_r04, _r03));
__m128 _tmp3m = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02));
__m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02));
__m128 _tmp5m = _mm_comp_fmadd_ps(_vm5, _r03, _mm_comp_fmadd_ps(_v4, _r01, _r05));
_mm_store_ps(tmp[0][m], _tmp0m);
_mm_store_ps(tmp[1][m], _tmp1m);
_mm_store_ps(tmp[2][m], _tmp2m);
_mm_store_ps(tmp[3][m], _tmp3m);
_mm_store_ps(tmp[4][m], _tmp4m);
_mm_store_ps(tmp[5][m], _tmp5m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3;
float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4;
float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5;
for (int m = 0; m < 6; m++)
{
__m128 _tmp00 = _mm_load_ps(tmp[m][0]);
__m128 _tmp01 = _mm_load_ps(tmp[m][1]);
__m128 _tmp02 = _mm_load_ps(tmp[m][2]);
__m128 _tmp03 = _mm_load_ps(tmp[m][3]);
__m128 _tmp04 = _mm_load_ps(tmp[m][4]);
__m128 _tmp05 = _mm_load_ps(tmp[m][5]);
__m128 _r0tm0 = _mm_comp_fmadd_ps(_vm5, _tmp02, _mm_comp_fmadd_ps(_v4, _tmp00, _tmp04));
__m128 _r0tm1 = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_tmp01, _tmp02), _mm_add_ps(_tmp04, _tmp03));
__m128 _r0tm2 = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_tmp01, _tmp02), _mm_sub_ps(_tmp04, _tmp03));
__m128 _r0tm3 = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02));
__m128 _r0tm4 = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02));
__m128 _r0tm5 = _mm_comp_fmadd_ps(_vm5, _tmp03, _mm_comp_fmadd_ps(_v4, _tmp01, _tmp05));
_mm_store_ps(r0_tm_0, _r0tm0);
_mm_store_ps(r0_tm_1, _r0tm1);
_mm_store_ps(r0_tm_2, _r0tm2);
_mm_store_ps(r0_tm_3, _r0tm3);
_mm_store_ps(r0_tm_4, _r0tm4);
_mm_store_ps(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 4 * 6;
r0_tm_1 += tiles * 4 * 6;
r0_tm_2 += tiles * 4 * 6;
r0_tm_3 += tiles * 4 * 6;
r0_tm_4 += tiles * 4 * 6;
r0_tm_5 += tiles * 4 * 6;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_output_pack4_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 4;
const int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
__m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps();
#ifdef _MSC_VER
__declspec(align(16))
#else
__attribute__((aligned(16)))
#endif
float tmp[4][6][4];
__m128 _v2 = _mm_set1_ps(2.f);
__m128 _v4 = _mm_set1_ps(4.f);
__m128 _v8 = _mm_set1_ps(8.f);
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5;
float* output0 = out0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
__m128 _out0tm0 = _mm_load_ps(output0_tm_0);
__m128 _out0tm1 = _mm_load_ps(output0_tm_1);
__m128 _out0tm2 = _mm_load_ps(output0_tm_2);
__m128 _out0tm3 = _mm_load_ps(output0_tm_3);
__m128 _out0tm4 = _mm_load_ps(output0_tm_4);
__m128 _out0tm5 = _mm_load_ps(output0_tm_5);
__m128 _tmp02a = _mm_add_ps(_out0tm1, _out0tm2);
__m128 _tmp13a = _mm_sub_ps(_out0tm1, _out0tm2);
__m128 _tmp02b = _mm_add_ps(_out0tm3, _out0tm4);
__m128 _tmp13b = _mm_sub_ps(_out0tm3, _out0tm4);
__m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp02a), _tmp02b);
__m128 _tmp1m = _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a);
__m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a);
__m128 _tmp3m = _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_out0tm5, _tmp13a));
_mm_store_ps(tmp[0][m], _tmp0m);
_mm_store_ps(tmp[1][m], _tmp1m);
_mm_store_ps(tmp[2][m], _tmp2m);
_mm_store_ps(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 4 * 6;
output0_tm_1 += tiles * 4 * 6;
output0_tm_2 += tiles * 4 * 6;
output0_tm_3 += tiles * 4 * 6;
output0_tm_4 += tiles * 4 * 6;
output0_tm_5 += tiles * 4 * 6;
}
for (int m = 0; m < 4; m++)
{
__m128 _tmp00 = _mm_load_ps(tmp[m][0]);
__m128 _tmp01 = _mm_load_ps(tmp[m][1]);
__m128 _tmp02 = _mm_load_ps(tmp[m][2]);
__m128 _tmp03 = _mm_load_ps(tmp[m][3]);
__m128 _tmp04 = _mm_load_ps(tmp[m][4]);
__m128 _tmp05 = _mm_load_ps(tmp[m][5]);
__m128 _tmp02a = _mm_add_ps(_tmp01, _tmp02);
__m128 _tmp13a = _mm_sub_ps(_tmp01, _tmp02);
__m128 _tmp02b = _mm_add_ps(_tmp03, _tmp04);
__m128 _tmp13b = _mm_sub_ps(_tmp03, _tmp04);
__m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp02a), _tmp02b));
__m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a));
__m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a));
__m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_tmp05, _tmp13a)));
_mm_store_ps(output0, _out00);
_mm_store_ps(output0 + 4, _out01);
_mm_store_ps(output0 + 4 * 2, _out02);
_mm_store_ps(output0 + 4 * 3, _out03);
output0 += outw * 4;
}
}
}
}
}
static void conv3x3s1_winograd23_transform_input_pack4_sse(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 2;
const int h_tiles = (h - 2) / 2;
const int tiles = w_tiles * h_tiles;
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
// 0 = r00 - r02
// 1 = r01 + r02
// 2 = r02 - r01
// 3 = r03 - r01
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
#ifdef _MSC_VER
__declspec(align(16))
#else
__attribute__((aligned(16)))
#endif
float tmp[4][4][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 2) + (j * 2) * 4;
for (int m = 0; m < 4; m++)
{
__m128 _r00 = _mm_load_ps(r0);
__m128 _r01 = _mm_load_ps(r0 + 4);
__m128 _r02 = _mm_load_ps(r0 + 4 * 2);
__m128 _r03 = _mm_load_ps(r0 + 4 * 3);
__m128 _tmp0m = _mm_sub_ps(_r00, _r02);
__m128 _tmp1m = _mm_add_ps(_r01, _r02);
__m128 _tmp2m = _mm_sub_ps(_r02, _r01);
__m128 _tmp3m = _mm_sub_ps(_r03, _r01);
_mm_store_ps(tmp[0][m], _tmp0m);
_mm_store_ps(tmp[1][m], _tmp1m);
_mm_store_ps(tmp[2][m], _tmp2m);
_mm_store_ps(tmp[3][m], _tmp3m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3;
for (int m = 0; m < 4; m++)
{
__m128 _tmp00 = _mm_load_ps(tmp[m][0]);
__m128 _tmp01 = _mm_load_ps(tmp[m][1]);
__m128 _tmp02 = _mm_load_ps(tmp[m][2]);
__m128 _tmp03 = _mm_load_ps(tmp[m][3]);
__m128 _r0tm0 = _mm_sub_ps(_tmp00, _tmp02);
__m128 _r0tm1 = _mm_add_ps(_tmp01, _tmp02);
__m128 _r0tm2 = _mm_sub_ps(_tmp02, _tmp01);
__m128 _r0tm3 = _mm_sub_ps(_tmp03, _tmp01);
_mm_store_ps(r0_tm_0, _r0tm0);
_mm_store_ps(r0_tm_1, _r0tm1);
_mm_store_ps(r0_tm_2, _r0tm2);
_mm_store_ps(r0_tm_3, _r0tm3);
r0_tm_0 += tiles * 4 * 4;
r0_tm_1 += tiles * 4 * 4;
r0_tm_2 += tiles * 4 * 4;
r0_tm_3 += tiles * 4 * 4;
}
}
}
}
}
static void conv3x3s1_winograd23_transform_output_pack4_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 2;
const int h_tiles = outh / 2;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r00 + r01 + r02
// 1 = r01 - r02 + r03
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
__m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps();
#ifdef _MSC_VER
__declspec(align(16))
#else
__attribute__((aligned(16)))
#endif
float tmp[2][4][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3;
float* output0 = out0.row(i * 2) + (j * 2) * 4;
for (int m = 0; m < 4; m++)
{
__m128 _out0tm0 = _mm_load_ps(output0_tm_0);
__m128 _out0tm1 = _mm_load_ps(output0_tm_1);
__m128 _out0tm2 = _mm_load_ps(output0_tm_2);
__m128 _out0tm3 = _mm_load_ps(output0_tm_3);
__m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _out0tm1), _out0tm2);
__m128 _tmp1m = _mm_add_ps(_mm_sub_ps(_out0tm1, _out0tm2), _out0tm3);
_mm_store_ps(tmp[0][m], _tmp0m);
_mm_store_ps(tmp[1][m], _tmp1m);
output0_tm_0 += tiles * 4 * 4;
output0_tm_1 += tiles * 4 * 4;
output0_tm_2 += tiles * 4 * 4;
output0_tm_3 += tiles * 4 * 4;
}
for (int m = 0; m < 2; m++)
{
__m128 _tmp00 = _mm_load_ps(tmp[m][0]);
__m128 _tmp01 = _mm_load_ps(tmp[m][1]);
__m128 _tmp02 = _mm_load_ps(tmp[m][2]);
__m128 _tmp03 = _mm_load_ps(tmp[m][3]);
__m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp01), _tmp02));
__m128 _out01 = _mm_add_ps(_bias0, _mm_add_ps(_mm_sub_ps(_tmp01, _tmp02), _tmp03));
_mm_store_ps(output0, _out00);
_mm_store_ps(output0 + 4, _out01);
output0 += outw * 4;
}
}
}
}
}
|
GB_binop__plus_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__plus_uint8
// A.*B function (eWiseMult): GB_AemultB__plus_uint8
// A*D function (colscale): GB_AxD__plus_uint8
// D*A function (rowscale): GB_DxB__plus_uint8
// C+=B function (dense accum): GB_Cdense_accumB__plus_uint8
// C+=b function (dense accum): GB_Cdense_accumb__plus_uint8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_uint8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_uint8
// C=scalar+B GB_bind1st__plus_uint8
// C=scalar+B' GB_bind1st_tran__plus_uint8
// C=A+scalar GB_bind2nd__plus_uint8
// C=A'+scalar GB_bind2nd_tran__plus_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x + y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_UINT8 || GxB_NO_PLUS_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__plus_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__plus_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__plus_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB_bind1st_tran__plus_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB_bind2nd_tran__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_pack4to1_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, 16u, elempack, opt.workspace_allocator);
conv3x3s1_winograd64_transform_input_pack4_bf16s_neon(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __aarch64__
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + tiles % 12 % 4, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator);
#else
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
float* tm2p = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0] \n"
"sub %0, %0, #128 \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v19.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19");
r0 += bottom_blob_tm.cstep * 4;
}
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8);
#else
float* tm2p = tm2.row(i / 8);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"sub %0, %0, #64 \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.f32 {d0-d3}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d4-d7}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d16-d19}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d20-d23}, [%0 :128] \n"
"sub %0, %0, #96 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
"vst1.f32 {d4-d5}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%1 :128]! \n"
"vst1.f32 {d2-d3}, [%1 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
"vst1.f32 {d22-d23}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
#endif
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.f32 {d0-d3}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d4-d7}, [%0 :128] \n"
"sub %0, %0, #32 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d4-d5}, [%1 :128]! \n"
"vst1.f32 {d2-d3}, [%1 :128]! \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i < tiles; i++)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + i % 4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
#if __aarch64__
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
float* output4_tm = top_blob_tm.channel(p + 4);
float* output5_tm = top_blob_tm.channel(p + 5);
float* output6_tm = top_blob_tm.channel(p + 6);
float* output7_tm = top_blob_tm.channel(p + 7);
const Mat kernel01_tm = kernel_tm.channel(p / 8);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v23.4s, v0.4s, v5.s[1] \n"
"fmla v26.4s, v0.4s, v5.s[2] \n"
"fmla v29.4s, v0.4s, v5.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v21.4s, v1.4s, v5.s[0] \n"
"fmla v24.4s, v1.4s, v5.s[1] \n"
"fmla v27.4s, v1.4s, v5.s[2] \n"
"fmla v30.4s, v1.4s, v5.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"fmla v22.4s, v2.4s, v5.s[0] \n"
"fmla v25.4s, v2.4s, v5.s[1] \n"
"fmla v28.4s, v2.4s, v5.s[2] \n"
"fmla v31.4s, v2.4s, v5.s[3] \n"
"fmla v8.4s, v3.4s, v6.s[0] \n"
"fmla v11.4s, v3.4s, v6.s[1] \n"
"fmla v14.4s, v3.4s, v6.s[2] \n"
"fmla v17.4s, v3.4s, v6.s[3] \n"
"fmla v20.4s, v3.4s, v7.s[0] \n"
"fmla v23.4s, v3.4s, v7.s[1] \n"
"fmla v26.4s, v3.4s, v7.s[2] \n"
"fmla v29.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v9.4s, v0.4s, v6.s[0] \n"
"fmla v12.4s, v0.4s, v6.s[1] \n"
"fmla v15.4s, v0.4s, v6.s[2] \n"
"fmla v18.4s, v0.4s, v6.s[3] \n"
"fmla v21.4s, v0.4s, v7.s[0] \n"
"fmla v24.4s, v0.4s, v7.s[1] \n"
"fmla v27.4s, v0.4s, v7.s[2] \n"
"fmla v30.4s, v0.4s, v7.s[3] \n"
"fmla v10.4s, v1.4s, v6.s[0] \n"
"fmla v13.4s, v1.4s, v6.s[1] \n"
"fmla v16.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v22.4s, v1.4s, v7.s[0] \n"
"fmla v25.4s, v1.4s, v7.s[1] \n"
"fmla v28.4s, v1.4s, v7.s[2] \n"
"fmla v31.4s, v1.4s, v7.s[3] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"fmla v8.4s, v2.4s, v4.s[0] \n"
"fmla v11.4s, v2.4s, v4.s[1] \n"
"fmla v14.4s, v2.4s, v4.s[2] \n"
"fmla v17.4s, v2.4s, v4.s[3] \n"
"fmla v20.4s, v2.4s, v5.s[0] \n"
"fmla v23.4s, v2.4s, v5.s[1] \n"
"fmla v26.4s, v2.4s, v5.s[2] \n"
"fmla v29.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v4.s[0] \n"
"fmla v12.4s, v3.4s, v4.s[1] \n"
"fmla v15.4s, v3.4s, v4.s[2] \n"
"fmla v18.4s, v3.4s, v4.s[3] \n"
"fmla v21.4s, v3.4s, v5.s[0] \n"
"fmla v24.4s, v3.4s, v5.s[1] \n"
"fmla v27.4s, v3.4s, v5.s[2] \n"
"fmla v30.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v10.4s, v0.4s, v4.s[0] \n"
"fmla v13.4s, v0.4s, v4.s[1] \n"
"fmla v16.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v22.4s, v0.4s, v5.s[0] \n"
"fmla v25.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v31.4s, v0.4s, v5.s[3] \n"
"fmla v8.4s, v1.4s, v6.s[0] \n"
"fmla v11.4s, v1.4s, v6.s[1] \n"
"fmla v14.4s, v1.4s, v6.s[2] \n"
"fmla v17.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v23.4s, v1.4s, v7.s[1] \n"
"fmla v26.4s, v1.4s, v7.s[2] \n"
"fmla v29.4s, v1.4s, v7.s[3] \n"
"fmla v9.4s, v2.4s, v6.s[0] \n"
"fmla v12.4s, v2.4s, v6.s[1] \n"
"fmla v15.4s, v2.4s, v6.s[2] \n"
"fmla v18.4s, v2.4s, v6.s[3] \n"
"fmla v21.4s, v2.4s, v7.s[0] \n"
"fmla v24.4s, v2.4s, v7.s[1] \n"
"fmla v27.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v10.4s, v3.4s, v6.s[0] \n"
"fmla v13.4s, v3.4s, v6.s[1] \n"
"fmla v16.4s, v3.4s, v6.s[2] \n"
"fmla v19.4s, v3.4s, v6.s[3] \n"
"fmla v22.4s, v3.4s, v7.s[0] \n"
"fmla v25.4s, v3.4s, v7.s[1] \n"
"fmla v28.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
"st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n"
"st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n"
"st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n"
"st1 {v20.4s, v21.4s, v22.4s}, [%5], #48 \n"
"st1 {v23.4s, v24.4s, v25.4s}, [%6], #48 \n"
"st1 {v26.4s, v27.4s, v28.4s}, [%7], #48 \n"
"st1 {v29.4s, v30.4s, v31.4s}, [%8], #48 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v18.4s, v0.4s, v4.s[1] \n"
"fmla v20.4s, v0.4s, v4.s[2] \n"
"fmla v22.4s, v0.4s, v4.s[3] \n"
"fmla v24.4s, v0.4s, v5.s[0] \n"
"fmla v26.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v30.4s, v0.4s, v5.s[3] \n"
"fmla v17.4s, v1.4s, v4.s[0] \n"
"fmla v19.4s, v1.4s, v4.s[1] \n"
"fmla v21.4s, v1.4s, v4.s[2] \n"
"fmla v23.4s, v1.4s, v4.s[3] \n"
"fmla v25.4s, v1.4s, v5.s[0] \n"
"fmla v27.4s, v1.4s, v5.s[1] \n"
"fmla v29.4s, v1.4s, v5.s[2] \n"
"fmla v31.4s, v1.4s, v5.s[3] \n"
"fmla v16.4s, v2.4s, v6.s[0] \n"
"fmla v18.4s, v2.4s, v6.s[1] \n"
"fmla v20.4s, v2.4s, v6.s[2] \n"
"fmla v22.4s, v2.4s, v6.s[3] \n"
"fmla v24.4s, v2.4s, v7.s[0] \n"
"fmla v26.4s, v2.4s, v7.s[1] \n"
"fmla v28.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v17.4s, v3.4s, v6.s[0] \n"
"fmla v19.4s, v3.4s, v6.s[1] \n"
"fmla v21.4s, v3.4s, v6.s[2] \n"
"fmla v23.4s, v3.4s, v6.s[3] \n"
"fmla v25.4s, v3.4s, v7.s[0] \n"
"fmla v27.4s, v3.4s, v7.s[1] \n"
"fmla v29.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v12.4s, v8.s[0] \n"
"fmla v18.4s, v12.4s, v8.s[1] \n"
"fmla v20.4s, v12.4s, v8.s[2] \n"
"fmla v22.4s, v12.4s, v8.s[3] \n"
"fmla v24.4s, v12.4s, v9.s[0] \n"
"fmla v26.4s, v12.4s, v9.s[1] \n"
"fmla v28.4s, v12.4s, v9.s[2] \n"
"fmla v30.4s, v12.4s, v9.s[3] \n"
"fmla v17.4s, v13.4s, v8.s[0] \n"
"fmla v19.4s, v13.4s, v8.s[1] \n"
"fmla v21.4s, v13.4s, v8.s[2] \n"
"fmla v23.4s, v13.4s, v8.s[3] \n"
"fmla v25.4s, v13.4s, v9.s[0] \n"
"fmla v27.4s, v13.4s, v9.s[1] \n"
"fmla v29.4s, v13.4s, v9.s[2] \n"
"fmla v31.4s, v13.4s, v9.s[3] \n"
"fmla v16.4s, v14.4s, v10.s[0] \n"
"fmla v18.4s, v14.4s, v10.s[1] \n"
"fmla v20.4s, v14.4s, v10.s[2] \n"
"fmla v22.4s, v14.4s, v10.s[3] \n"
"fmla v24.4s, v14.4s, v11.s[0] \n"
"fmla v26.4s, v14.4s, v11.s[1] \n"
"fmla v28.4s, v14.4s, v11.s[2] \n"
"fmla v30.4s, v14.4s, v11.s[3] \n"
"fmla v17.4s, v15.4s, v10.s[0] \n"
"fmla v19.4s, v15.4s, v10.s[1] \n"
"fmla v21.4s, v15.4s, v10.s[2] \n"
"fmla v23.4s, v15.4s, v10.s[3] \n"
"fmla v25.4s, v15.4s, v11.s[0] \n"
"fmla v27.4s, v15.4s, v11.s[1] \n"
"fmla v29.4s, v15.4s, v11.s[2] \n"
"fmla v31.4s, v15.4s, v11.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
"st1 {v20.4s, v21.4s}, [%3], #32 \n"
"st1 {v22.4s, v23.4s}, [%4], #32 \n"
"st1 {v24.4s, v25.4s}, [%5], #32 \n"
"st1 {v26.4s, v27.4s}, [%6], #32 \n"
"st1 {v28.4s, v29.4s}, [%7], #32 \n"
"st1 {v30.4s, v31.4s}, [%8], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v17.4s, v0.4s, v4.s[1] \n"
"fmla v18.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v21.4s, v0.4s, v5.s[1] \n"
"fmla v22.4s, v0.4s, v5.s[2] \n"
"fmla v23.4s, v0.4s, v5.s[3] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v1.4s, v6.s[0] \n"
"fmla v17.4s, v1.4s, v6.s[1] \n"
"fmla v18.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v21.4s, v1.4s, v7.s[1] \n"
"fmla v22.4s, v1.4s, v7.s[2] \n"
"fmla v23.4s, v1.4s, v7.s[3] \n"
"fmla v16.4s, v2.4s, v8.s[0] \n"
"fmla v17.4s, v2.4s, v8.s[1] \n"
"fmla v18.4s, v2.4s, v8.s[2] \n"
"fmla v19.4s, v2.4s, v8.s[3] \n"
"fmla v20.4s, v2.4s, v9.s[0] \n"
"fmla v21.4s, v2.4s, v9.s[1] \n"
"fmla v22.4s, v2.4s, v9.s[2] \n"
"fmla v23.4s, v2.4s, v9.s[3] \n"
"fmla v16.4s, v3.4s, v10.s[0] \n"
"fmla v17.4s, v3.4s, v10.s[1] \n"
"fmla v18.4s, v3.4s, v10.s[2] \n"
"fmla v19.4s, v3.4s, v10.s[3] \n"
"fmla v20.4s, v3.4s, v11.s[0] \n"
"fmla v21.4s, v3.4s, v11.s[1] \n"
"fmla v22.4s, v3.4s, v11.s[2] \n"
"fmla v23.4s, v3.4s, v11.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
"st1 {v18.4s}, [%3], #16 \n"
"st1 {v19.4s}, [%4], #16 \n"
"st1 {v20.4s}, [%5], #16 \n"
"st1 {v21.4s}, [%6], #16 \n"
"st1 {v22.4s}, [%7], #16 \n"
"st1 {v23.4s}, [%8], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.4s}, [%9], #16 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v5.4s, v0.s[0] \n"
"fmla v18.4s, v6.4s, v0.s[1] \n"
"fmla v19.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v8.4s, v0.s[2] \n"
"fmla v17.4s, v9.4s, v0.s[2] \n"
"fmla v18.4s, v10.4s, v0.s[3] \n"
"fmla v19.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"st1 {v16.s}[0], [%1], #4 \n"
"st1 {v16.s}[1], [%2], #4 \n"
"st1 {v16.s}[2], [%3], #4 \n"
"st1 {v16.s}[3], [%4], #4 \n"
"st1 {v17.s}[0], [%5], #4 \n"
"st1 {v17.s}[1], [%6], #4 \n"
"st1 {v17.s}[2], [%7], #4 \n"
"st1 {v17.s}[3], [%8], #4 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
}
}
}
remain_outch_start += nn_outch << 3;
nn_outch = (outch - remain_outch_start) >> 2;
#else // __aarch64__
nn_outch = outch >> 2;
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
#if __aarch64__
const Mat kernel01_tm = kernel_tm.channel(p / 8 + (p % 8) / 4);
#else
const Mat kernel01_tm = kernel_tm.channel(p / 4);
#endif
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%5], #64 \n"
"fmla v8.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v14.4s, v3.4s, v5.s[2] \n"
"fmla v17.4s, v3.4s, v5.s[3] \n"
"fmla v9.4s, v20.4s, v5.s[0] \n"
"fmla v12.4s, v20.4s, v5.s[1] \n"
"fmla v15.4s, v20.4s, v5.s[2] \n"
"fmla v18.4s, v20.4s, v5.s[3] \n"
"fmla v10.4s, v21.4s, v5.s[0] \n"
"fmla v13.4s, v21.4s, v5.s[1] \n"
"fmla v16.4s, v21.4s, v5.s[2] \n"
"fmla v19.4s, v21.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5], #64 \n"
"fmla v8.4s, v22.4s, v6.s[0] \n"
"fmla v11.4s, v22.4s, v6.s[1] \n"
"fmla v14.4s, v22.4s, v6.s[2] \n"
"fmla v17.4s, v22.4s, v6.s[3] \n"
"fmla v9.4s, v23.4s, v6.s[0] \n"
"fmla v12.4s, v23.4s, v6.s[1] \n"
"fmla v15.4s, v23.4s, v6.s[2] \n"
"fmla v18.4s, v23.4s, v6.s[3] \n"
"fmla v10.4s, v24.4s, v6.s[0] \n"
"fmla v13.4s, v24.4s, v6.s[1] \n"
"fmla v16.4s, v24.4s, v6.s[2] \n"
"fmla v19.4s, v24.4s, v6.s[3] \n"
"fmla v8.4s, v25.4s, v7.s[0] \n"
"fmla v11.4s, v25.4s, v7.s[1] \n"
"fmla v14.4s, v25.4s, v7.s[2] \n"
"fmla v17.4s, v25.4s, v7.s[3] \n"
"fmla v9.4s, v26.4s, v7.s[0] \n"
"fmla v12.4s, v26.4s, v7.s[1] \n"
"fmla v15.4s, v26.4s, v7.s[2] \n"
"fmla v18.4s, v26.4s, v7.s[3] \n"
"fmla v10.4s, v27.4s, v7.s[0] \n"
"fmla v13.4s, v27.4s, v7.s[1] \n"
"fmla v16.4s, v27.4s, v7.s[2] \n"
"fmla v19.4s, v27.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
"st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n"
"st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n"
"st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif // __aarch64__
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
#else
const float* r0 = bb2.row(i / 8);
#endif
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v10.4s, v0.4s, v4.s[1] \n"
"fmla v12.4s, v0.4s, v4.s[2] \n"
"fmla v14.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v11.4s, v1.4s, v4.s[1] \n"
"fmla v13.4s, v1.4s, v4.s[2] \n"
"fmla v15.4s, v1.4s, v4.s[3] \n"
"fmla v8.4s, v2.4s, v5.s[0] \n"
"fmla v10.4s, v2.4s, v5.s[1] \n"
"fmla v12.4s, v2.4s, v5.s[2] \n"
"fmla v14.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v13.4s, v3.4s, v5.s[2] \n"
"fmla v15.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%5], #64 \n"
"fmla v8.4s, v16.4s, v6.s[0] \n"
"fmla v10.4s, v16.4s, v6.s[1] \n"
"fmla v12.4s, v16.4s, v6.s[2] \n"
"fmla v14.4s, v16.4s, v6.s[3] \n"
"fmla v9.4s, v17.4s, v6.s[0] \n"
"fmla v11.4s, v17.4s, v6.s[1] \n"
"fmla v13.4s, v17.4s, v6.s[2] \n"
"fmla v15.4s, v17.4s, v6.s[3] \n"
"fmla v8.4s, v18.4s, v7.s[0] \n"
"fmla v10.4s, v18.4s, v7.s[1] \n"
"fmla v12.4s, v18.4s, v7.s[2] \n"
"fmla v14.4s, v18.4s, v7.s[3] \n"
"fmla v9.4s, v19.4s, v7.s[0] \n"
"fmla v11.4s, v19.4s, v7.s[1] \n"
"fmla v13.4s, v19.4s, v7.s[2] \n"
"fmla v15.4s, v19.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"st1 {v12.4s, v13.4s}, [%3], #32 \n"
"st1 {v14.4s, v15.4s}, [%4], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"veor q12, q12 \n"
"veor q13, q13 \n"
"veor q14, q14 \n"
"veor q15, q15 \n"
"0: \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q10, q0, d8[1] \n"
"vmla.f32 q12, q0, d9[0] \n"
"vmla.f32 q14, q0, d9[1] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q11, q1, d8[1] \n"
"vmla.f32 q13, q1, d9[0] \n"
"vmla.f32 q15, q1, d9[1] \n"
"vmla.f32 q8, q2, d10[0] \n"
"vmla.f32 q10, q2, d10[1] \n"
"vmla.f32 q12, q2, d11[0] \n"
"vmla.f32 q14, q2, d11[1] \n"
"vmla.f32 q9, q3, d10[0] \n"
"vmla.f32 q11, q3, d10[1] \n"
"vmla.f32 q13, q3, d11[0] \n"
"vmla.f32 q15, q3, d11[1] \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"vmla.f32 q8, q0, d12[0] \n"
"vmla.f32 q10, q0, d12[1] \n"
"vmla.f32 q12, q0, d13[0] \n"
"vmla.f32 q14, q0, d13[1] \n"
"vmla.f32 q9, q1, d12[0] \n"
"vmla.f32 q11, q1, d12[1] \n"
"vmla.f32 q13, q1, d13[0] \n"
"vmla.f32 q15, q1, d13[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d14[0] \n"
"vmla.f32 q10, q2, d14[1] \n"
"vmla.f32 q12, q2, d15[0] \n"
"vmla.f32 q14, q2, d15[1] \n"
"vmla.f32 q9, q3, d14[0] \n"
"vmla.f32 q11, q3, d14[1] \n"
"vmla.f32 q13, q3, d15[0] \n"
"vmla.f32 q15, q3, d15[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1]! \n"
"vst1.f32 {d20-d23}, [%2]! \n"
"vst1.f32 {d24-d27}, [%3]! \n"
"vst1.f32 {d28-d31}, [%4]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#endif
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v0.4s, v4.s[1] \n"
"fmla v10.4s, v0.4s, v4.s[2] \n"
"fmla v11.4s, v0.4s, v4.s[3] \n"
"fmla v8.4s, v1.4s, v5.s[0] \n"
"fmla v9.4s, v1.4s, v5.s[1] \n"
"fmla v10.4s, v1.4s, v5.s[2] \n"
"fmla v11.4s, v1.4s, v5.s[3] \n"
"fmla v8.4s, v2.4s, v6.s[0] \n"
"fmla v9.4s, v2.4s, v6.s[1] \n"
"fmla v10.4s, v2.4s, v6.s[2] \n"
"fmla v11.4s, v2.4s, v6.s[3] \n"
"fmla v8.4s, v3.4s, v7.s[0] \n"
"fmla v9.4s, v3.4s, v7.s[1] \n"
"fmla v10.4s, v3.4s, v7.s[2] \n"
"fmla v11.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%2], #16 \n"
"st1 {v10.4s}, [%3], #16 \n"
"st1 {v11.4s}, [%4], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q0, d8[1] \n"
"vmla.f32 q10, q0, d9[0] \n"
"vmla.f32 q11, q0, d9[1] \n"
"vmla.f32 q8, q1, d10[0] \n"
"vmla.f32 q9, q1, d10[1] \n"
"vmla.f32 q10, q1, d11[0] \n"
"vmla.f32 q11, q1, d11[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d12[0] \n"
"vmla.f32 q9, q2, d12[1] \n"
"vmla.f32 q10, q2, d13[0] \n"
"vmla.f32 q11, q2, d13[1] \n"
"vmla.f32 q8, q3, d14[0] \n"
"vmla.f32 q9, q3, d14[1] \n"
"vmla.f32 q10, q3, d15[0] \n"
"vmla.f32 q11, q3, d15[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1]! \n"
"vst1.f32 {d18-d19}, [%2]! \n"
"vst1.f32 {d20-d21}, [%3]! \n"
"vst1.f32 {d22-d23}, [%4]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < tiles; i++)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#endif
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[1] \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"st1 {v8.s}[0], [%1], #4 \n"
"st1 {v8.s}[1], [%2], #4 \n"
"st1 {v8.s}[2], [%3], #4 \n"
"st1 {v8.s}[3], [%4], #4 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5]! \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q7, d1[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vst1.f32 {d16[0]}, [%1]! \n"
"vst1.f32 {d16[1]}, [%2]! \n"
"vst1.f32 {d17[0]}, [%3]! \n"
"vst1.f32 {d17[1]}, [%4]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
#if __aarch64__
const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4);
#endif
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* kptr = kernel0_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n"
"fmla v5.4s, v3.4s, v4.s[1] \n"
"fmla v6.4s, v12.4s, v4.s[1] \n"
"fmla v7.4s, v13.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n"
"fmla v8.4s, v14.4s, v4.s[2] \n"
"fmla v9.4s, v15.4s, v4.s[2] \n"
"fmla v10.4s, v16.4s, v4.s[2] \n"
"fmla v5.4s, v17.4s, v4.s[3] \n"
"fmla v6.4s, v18.4s, v4.s[3] \n"
"fmla v7.4s, v19.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v5.4s \n"
"fadd v9.4s, v9.4s, v6.4s \n"
"fadd v10.4s, v10.4s, v7.4s \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
#else
const float* r0 = bb2.row(i / 8);
#endif
const float* kptr = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[1] \n"
"fmla v11.4s, v3.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n"
"fmla v8.4s, v12.4s, v4.s[2] \n"
"fmla v9.4s, v13.4s, v4.s[2] \n"
"fmla v10.4s, v14.4s, v4.s[3] \n"
"fmla v11.4s, v15.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #128] \n"
"vld1.f32 {d8-d9}, [%3]! \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q10, q2, d8[1] \n"
"vmla.f32 q11, q3, d8[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d24-d31} \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q12, d9[0] \n"
"vmla.f32 q9, q13, d9[0] \n"
"vmla.f32 q10, q14, d9[1] \n"
"vmla.f32 q11, q15, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q9, q9, q11 \n"
"vst1.f32 {d16-d19}, [%1]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#endif
const float* kptr = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"st1 {v8.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #128] \n"
"vld1.f32 {d8-d9}, [%3]! \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vst1.f32 {d16-d17}, [%1]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < tiles; i++)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#endif
const float* kptr = kernel0_tm.row(r);
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (int q = 0; q < inch; q++)
{
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _k0 = vld1q_f32(kptr);
_sum0 = vmlaq_f32(_sum0, _r0, _k0);
kptr += 4;
r0 += 4;
}
#if __aarch64__
float sum0 = vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss2 = vpadd_f32(_ss, _ss);
float sum0 = vget_lane_f32(_ss2, 0);
#endif
output0_tm[0] = sum0;
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 2u, 1, opt.workspace_allocator);
}
{
conv3x3s1_winograd64_transform_output_bf16s_neon(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
mkl_util.h | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#ifdef INTEL_MKL
#include <string>
#include <vector>
#include <unordered_map>
#include <utility>
#ifdef INTEL_MKL_ML
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#include "mkl_service.h"
#include "mkl_trans.h"
#endif
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#ifndef INTEL_MKL_ML
#include "mkldnn.hpp"
#include "tensorflow/core/lib/core/stringpiece.h"
using mkldnn::engine;
using mkldnn::memory;
using mkldnn::padding_kind;
using mkldnn::primitive;
using mkldnn::reorder;
#endif
#ifdef _WIN32
typedef unsigned int uint;
#endif
namespace tensorflow {
// The file contains a number of utility classes and functions used by MKL
// enabled kernels
// This class encapsulates all the meta data that is associated with an MKL
// tensor. A tensor is an MKL tensor if it was created as the result of an
// MKL operation, and did not go through a conversion to a standard
// Tensorflow tensor.
typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims;
typedef enum {
Dim_N = 0,
Dim_C = 1,
Dim_H = 2,
Dim_W = 3,
Dim_O = 0,
Dim_I = 1
} MklDnnDims;
#ifdef INTEL_MKL_ML
class MklShape {
public:
MklShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy
~MklShape() {
if (sizes_) delete[] sizes_;
if (strides_) delete[] strides_;
if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS);
if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS);
if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_;
}
const bool IsMklTensor() const { return isMklTensor_; }
void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; }
void SetDimensions(const size_t dimension) { dimension_ = dimension; }
void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; }
void SetMklLayout(const void* primitive, size_t resourceType) {
CHECK_EQ(
dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive,
(dnnResourceType_t)resourceType),
E_SUCCESS);
}
void SetTfLayout(const size_t dimension, const size_t* sizes,
const size_t* strides) {
dimension_ = dimension;
if (dimension > 0) { // MKl doesn't support zero dimension tensors
sizes_ = new size_t[dimension];
strides_ = new size_t[dimension];
for (int ii = 0; ii < dimension; ii++) {
sizes_[ii] = sizes[ii];
strides_[ii] = strides[ii];
}
CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides),
E_SUCCESS);
}
}
// Default case - MKL dim ordering is opposite of TF dim ordering
// MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim
// TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim
// For layers that rely on data_format semantics (conv, pooling etc.)
// or operate only on certain dimensions (relu, concat, split etc.),
// Mkl APIs might require us to reorder these dimensions. In such cases,
// kernels should explicitly set this map
void SetTfDimOrder(const size_t dimension) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = dimension - (ii + 1);
}
}
void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii];
}
}
void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
CHECK_EQ(dimension, 4);
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N;
}
const dnnLayout_t GetMklLayout() const { return mklLayout_; }
const dnnLayout_t GetTfLayout() const { return tfLayout_; }
const dnnLayout_t GetCurLayout() const {
return isMklTensor_ ? mklLayout_ : tfLayout_;
}
size_t GetDimension() const { return dimension_; }
const size_t* GetSizes() const { return sizes_; }
int64 dim_size(int index) const { return sizes_[index]; }
int64 tf_dim_size(int index) const {
return sizes_[tf_to_mkl_dim_map_[index]];
}
const size_t* GetStrides() const { return strides_; }
const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; }
size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Channel dimension.
bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Batch dimension.
bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Width dimension.
bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Height dimension.
bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; }
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NCHW format.
bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NHWC format.
bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
void GetConvertedFlatData(dnnLayout_t targetLayout, void* input,
void* output) const {
dnnLayout_t curLayout;
if (isMklTensor_)
curLayout = mklLayout_;
else
curLayout = tfLayout_;
dnnPrimitive_t convert;
CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout),
E_SUCCESS);
CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS);
CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS);
}
// The following methods are used for serializing and de-serializing the
// contents of the mklshape object.
// The data is serialized in this order
// isMklTensor_
// dimension_
// sizes_
// strides_
// mklLayout_
// tfLayout_
// tf_to_mkl_dim_map_
#define SIZE_OF_MKL_DNN_BUF \
(dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to
// serialize dnn_layout pointer
// Size of buffer to hold the serialized object, the size is computed as
// follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) +
// sizeof(strides_)
// + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer)
// + sizeof(tf_to_mkl_dim_map_)
#define SIZE_OF_MKL_SERIAL_DATA(dims) \
(2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF)
// First we need to define some macro for offsets into the serial buffer where
// different elements of Mklshape is written/read from
#define IS_MKL_TENSOR_OFFSET 0
// Location from start of buffer where isMklTensor_ is serialized
#define DIMS_OFFSET \
(IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_
// Location of sizes. Note dim is not used here, left here
// to make macros consistent.
#define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t))
#define STRIDES_OFFSET(dims) \
(SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides
#define MKL_LAYOUT_OFFSET(dims) \
(STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_
#define TF_LAYOUT_OFFSET(dims) \
(MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_
// Location of tf_to_mkl_dim_map_
#define TF_TO_MKL_DIM_MAP_OFFSET(dims) \
(TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF)
// TODO(agramesh1) make sure to create a const to share with rewrite pass
// for min size of MKL metadata tensor.
void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) {
CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize";
// Make sure buffer holds at least isMklTensor_
isMklTensor_ =
*reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0;
if (isMklTensor_) { // If it is an MKL Tensor then read the rest
dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET));
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small in DeSerialize";
sizes_ = new size_t[dimension_];
strides_ = new size_t[dimension_];
tf_to_mkl_dim_map_ = new size_t[dimension_];
for (int i = 0; i < dimension_; i++) {
sizes_[i] =
reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i];
strides_[i] = reinterpret_cast<const size_t*>(
buf + STRIDES_OFFSET(dimension_))[i];
tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>(
buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i];
}
CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_,
buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
void SerializeMklShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small to Serialize";
*reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) =
isMklTensor_ ? 1 : 0;
if (isMklTensor_) {
*(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_;
for (int i = 0; i < dimension_; i++) {
reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] =
sizes_[i];
reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] =
strides_[i];
reinterpret_cast<size_t*>(buf +
TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] =
tf_to_mkl_dim_map_[i];
}
CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(
dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
private:
bool isMklTensor_ =
false; // Flag to indicate if the tensor is an MKL tensor or not
dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout
dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding
// Tensorflow tensor, used when conversion from MKL to standard tensor
size_t dimension_ = 0;
size_t* sizes_ = nullptr; // Required by MKL for conversions
size_t* strides_ = nullptr; // Required by MKL for conversions
size_t* tf_to_mkl_dim_map_ =
nullptr; // TF dimension corresponding to this MKL dimension
};
#else
// Forward decl
TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format);
memory::dims CalculateTFStrides(const memory::dims& dims_tf_order);
memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype);
class MklDnnShape {
private:
typedef struct {
/// Flag to indicate if the tensor is an MKL tensor or not
bool is_mkl_tensor_ = false;
/// Number of dimensions in Tensorflow format
size_t dimension_ = 0;
/// Required by MKLDNN for conversions
mkldnn_dims_t sizes_; // Required by MKL for conversions
memory::format tf_data_format_ = memory::format::format_undef;
memory::data_type T_ = memory::data_type::data_undef;
// MKL layout
mkldnn_memory_desc_t mkl_md_;
/// TF dimension corresponding to this MKL dimension
mkldnn_dims_t map_;
} MklShapeData;
MklShapeData data_;
typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t;
#define INVALID_DIM_SIZE -1
public:
MklDnnShape() {
for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
++i) {
data_.sizes_[i] = -1;
}
for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) {
data_.map_[i] = -1;
}
}
~MklDnnShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy
/// Helper function to compare memory::desc objects for MklDnn.
/// May be this should go into MklDnn directly.
inline bool CompareMklDnnLayouts(const memory::desc& md1,
const memory::desc& md2) const {
mkldnn_memory_desc_t mdd1 = md1.data;
mkldnn_memory_desc_t mdd2 = md2.data;
const char* d1 = reinterpret_cast<const char*>(&mdd1);
const char* d2 = reinterpret_cast<const char*>(&mdd2);
size_t md_size = sizeof(mdd1);
for (size_t i = 0; i < md_size; i++) {
if (*d1++ != *d2++) {
return false;
}
}
return true;
}
/// Equality function for MklDnnShape objects
/// @return true if both are equal; false otherwise.
inline bool operator==(const MklDnnShape& input_shape) const {
if (this->IsMklTensor() != input_shape.IsMklTensor()) {
return false;
}
// If input tensors are in Mkl layout, then we check for dimensions and
// sizes.
if (this->IsMklTensor()) {
return this->GetTfShape() == input_shape.GetTfShape() &&
CompareMklDnnLayouts(this->GetMklLayout(),
input_shape.GetMklLayout());
}
return true;
}
/// Equality operator for MklDnnShape and TFShape.
/// Returns: true if TF shapes for both are the same, false otherwise
inline bool operator==(const TensorShape& input_shape) const {
if (!this->IsMklTensor()) {
return false;
}
return this->GetTfShape() == input_shape;
}
inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; }
inline void SetMklTensor(bool is_mkl_tensor) {
data_.is_mkl_tensor_ = is_mkl_tensor;
}
inline void SetDimensions(const size_t dimension) {
data_.dimension_ = dimension;
}
inline size_t GetDimension(char dimension) const {
int index = GetMklDnnTensorDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline int32 GetMklDnnTensorDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims::Dim_N;
case 'C':
return MklDnnDims::Dim_C;
case 'H':
return MklDnnDims::Dim_H;
case 'W':
return MklDnnDims::Dim_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline size_t GetDimension() const { return data_.dimension_; }
inline const int* GetSizes() const {
return reinterpret_cast<const int*>(&data_.sizes_[0]);
}
// Returns an mkldnn::memory::dims object that contains the sizes of this
// MklDnnShape object.
inline memory::dims GetSizesAsMklDnnDims() const {
memory::dims retVal;
if (data_.is_mkl_tensor_) {
size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
for (size_t i = 0; i < dimensions; i++) {
if (data_.sizes_[i] != INVALID_DIM_SIZE)
retVal.push_back(data_.sizes_[i]);
}
} else {
CHECK_EQ(data_.is_mkl_tensor_, true);
}
return retVal;
}
inline int64 DimSize(int index) const {
CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0]));
return data_.sizes_[index];
}
/// Return TensorShape that describes the Tensorflow shape of the tensor
/// represented by this MklShape.
inline TensorShape GetTfShape() const {
CHECK_EQ(data_.is_mkl_tensor_, true);
std::vector<int32> shape(data_.dimension_, -1);
if (data_.tf_data_format_ != memory::format::blocked) {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[TfDimIdx(idx)];
}
} else {
// If Tensorflow shape is in Blocked format, then we don't have dimension
// map for it. So we just create Tensorflow shape from sizes in the
// specified order.
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[idx];
}
}
TensorShape ts;
bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok();
CHECK_EQ(ret, true);
return ts;
}
inline void SetElemType(memory::data_type dt) { data_.T_ = dt; }
inline const memory::data_type GetElemType() { return data_.T_; }
inline void SetMklLayout(memory::primitive_desc* pd) {
CHECK_NOTNULL(pd);
data_.mkl_md_ = pd->desc().data;
}
inline void SetMklLayout(memory::desc* md) {
CHECK_NOTNULL(md);
data_.mkl_md_ = md->data;
}
inline const memory::desc GetMklLayout() const {
return memory::desc(data_.mkl_md_);
}
inline memory::format GetTfDataFormat() const {
return data_.tf_data_format_;
}
/// We don't create primitive_descriptor for TensorFlow layout now.
/// We use lazy evaluation and create it only when needed. Input format can
/// also be Blocked format.
inline void SetTfLayout(size_t dims, const memory::dims& sizes,
memory::format format) {
CHECK_EQ(dims, sizes.size());
data_.dimension_ = dims;
for (size_t ii = 0; ii < dims; ii++) {
data_.sizes_[ii] = sizes[ii];
}
data_.tf_data_format_ = format;
if (format != memory::format::blocked) {
SetTfDimOrder(dims, format);
}
}
inline const memory::desc GetTfLayout() const {
memory::dims dims;
for (size_t ii = 0; ii < data_.dimension_; ii++) {
dims.push_back(data_.sizes_[ii]);
}
// Create Blocked memory desc if input TF format was set like that.
if (data_.tf_data_format_ == memory::format::blocked) {
auto strides = CalculateTFStrides(dims);
return CreateBlockedMemDescHelper(dims, strides, data_.T_);
} else {
return memory::desc(dims, data_.T_, data_.tf_data_format_);
}
}
inline const memory::desc GetCurLayout() const {
return IsMklTensor() ? GetMklLayout() : GetTfLayout();
}
// nhasabni - I've removed SetTfDimOrder that was setting default order in
// case of MKL-ML. We don't need a case of default dimension order because
// when an operator that does not get data_format attribute gets all inputs
// in Tensorflow format, it will produce output in Tensorflow format.
inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) {
CHECK(dimension == data_.dimension_);
for (size_t ii = 0; ii < dimension; ii++) {
data_.map_[ii] = map[ii];
}
}
inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
// TODO(nhasabni): Why do we restrict this to 4D?
CHECK_EQ(dimension, 4);
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W;
data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H;
data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C;
data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N;
}
inline void SetTfDimOrder(const size_t dimension, memory::format format) {
TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format);
SetTfDimOrder(dimension, data_format);
}
inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; }
inline size_t TfDimIdx(int index) const { return data_.map_[index]; }
inline int64 TfDimSize(int index) const {
return data_.sizes_[TfDimIdx(index)];
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Channel dimension.
inline bool IsMklChannelDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_C;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Batch dimension.
inline bool IsMklBatchDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_N;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Width dimension.
inline bool IsMklWidthDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_W;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Height dimension.
inline bool IsMklHeightDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_H;
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NCHW format.
inline bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NHWC format.
inline bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// The following methods are used for serializing and de-serializing the
/// contents of the mklshape object.
/// The data is serialized in this order
/// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_;
/// Size of buffer to hold the serialized object, the size is computed by
/// following above mentioned order
inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); }
void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small to SerializeMklDnnShape";
*reinterpret_cast<MklShapeData*>(buf) = data_;
}
void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) {
// Make sure buffer holds at least is_mkl_tensor_.
CHECK(buf_size >= sizeof(data_.is_mkl_tensor_))
<< "Buffer size is too small in DeSerializeMklDnnShape";
const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf);
if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small in DeSerializeMklDnnShape";
data_ = *reinterpret_cast<const MklShapeData*>(buf);
}
}
};
#endif
// List of MklShape objects. Used in Concat/Split layers.
#ifndef INTEL_MKL_ML
typedef std::vector<MklDnnShape> MklDnnShapeList;
#else
typedef std::vector<MklShape> MklShapeList;
#endif
#ifdef INTEL_MKL_ML
// Check if all tensors specified by MklShapes are MKL tensors.
inline bool AreAllMklTensors(const MklShapeList& shapes) {
for (auto& s : shapes) {
if (!s.IsMklTensor()) {
return false;
}
}
return true;
}
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
for (size_t j = 0; j < mkl_shape.GetDimension(); j++) {
// Outermost to innermost dimension
output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]);
}
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor);
dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout());
void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data());
void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data());
if (mkl_tensor.NumElements() != 0) {
mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer);
}
return output_tensor;
}
#else
using mkldnn::stream;
template <typename T> class MklDnnData;
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklDnnShape& mkl_shape) {
Tensor output_tensor;
try {
if (!mkl_shape.IsMklTensor())
return mkl_tensor; // return input since it is already TF tensor
TensorShape output_shape = mkl_shape.GetTfShape();;
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(),
output_shape, &output_tensor);
auto cpu_engine = engine(engine::cpu, 0);
MklDnnData<T> input(&cpu_engine);
// Get Mkl layout of input tensor.
auto input_mkl_md = mkl_shape.GetMklLayout();
auto output_tf_md = mkl_shape.GetTfLayout();
auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine);
input.SetUsrMem(input_mkl_md, &mkl_tensor);
// reorder
if (input.IsReorderNeeded(output_tf_pd)) {
std::vector<primitive> net;
CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net),
true);
stream(stream::kind::eager).submit(net).wait();
} else {
// If not, just forward input tensor to output tensor.
CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape));
}
} catch (mkldnn::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
LOG(FATAL) << "Operation received an exception: " << error_msg;
}
return output_tensor;
}
#endif
// Get the MKL shape from the second string tensor
#ifdef INTEL_MKL_ML
inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) {
mklshape->DeSerializeMklShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#else
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) {
mklshape->DeSerializeMklDnnShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#endif
// Gets the actual input
inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
return ctext->input(GetTensorDataIndex(n, ctext->num_inputs()));
}
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
ctext->input_list(name, input_tensors);
}
#ifdef INTEL_MKL_ML
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#else
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklDnnShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklDnnShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#endif
#ifndef INTEL_MKL_ML
/// Get shape of input tensor pointed by 'input_idx' in TensorShape format.
/// If the input tensor is in MKL layout, then obtains TensorShape from
/// MklShape.
inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) {
// Sanity check.
CHECK_NOTNULL(context);
CHECK_LT(input_idx, context->num_inputs());
MklDnnShape input_mkl_shape;
GetMklShape(context, input_idx, &input_mkl_shape);
if (input_mkl_shape.IsMklTensor()) {
return input_mkl_shape.GetTfShape();
} else {
const Tensor& t = MklGetInput(context, input_idx);
return t.shape();
}
}
#endif
#ifdef INTEL_MKL_ML
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
#ifdef INTEL_MKL_ML
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocates a temp tensor and returns the data buffer for temporary storage.
// Currently
#ifndef INTEL_MKL_ML
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::primitive_desc& pd, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(pd.get_size() / sizeof(T) + 1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<T>().data());
}
#else
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
dnnLayout_t lt_buff, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(
dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) /
sizeof(float) +
1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<float>().data());
}
#endif
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
TensorShape tf_shape) {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
}
inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides,
const size_t* sizes) {
// MKL requires strides in NCHW
if (data_format == FORMAT_NHWC) {
strides[0] = sizes[2];
strides[1] = sizes[0] * sizes[2];
strides[2] = 1;
strides[3] = sizes[0] * sizes[1] * sizes[2];
} else {
strides[0] = 1;
strides[1] = sizes[0];
strides[2] = sizes[0] * sizes[1];
strides[3] = sizes[0] * sizes[1] * sizes[2];
}
}
#ifdef INTEL_MKL_ML
inline void MklSizesToTFSizes(OpKernelContext* context,
TensorFormat data_format_,
const MklShape& mkl_shape,
TensorShape* tf_shape) {
size_t tf_dim = mkl_shape.GetDimension();
const size_t* tf_sizes = mkl_shape.GetSizes();
OP_REQUIRES(context, tf_dim == 4,
errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim"));
std::vector<int32> sizes;
sizes.push_back(tf_sizes[3]);
if (data_format_ == FORMAT_NHWC) {
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
sizes.push_back(tf_sizes[2]);
} else {
sizes.push_back(tf_sizes[2]);
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
}
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape));
}
#endif
inline int32 GetMklTensorDimIndex(char dimension) {
switch (dimension) {
case 'N':
return MklDims::N;
case 'C':
return MklDims::C;
case 'H':
return MklDims::H;
case 'W':
return MklDims::W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
#ifdef INTEL_MKL_ML
inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) {
int index = GetMklTensorDimIndex(dimension);
CHECK(index >= 0 && index < mkl_shape.GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return mkl_shape.dim_size(index);
}
#endif
inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
const Tensor& meta = context->input(idx_meta_in);
Tensor output(data.dtype());
Tensor meta_output(meta.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, data.shape()));
CHECK(meta_output.CopyFrom(meta, meta.shape()));
context->set_output(idx_data_out, output);
context->set_output(idx_meta_out, meta_output);
}
#ifdef INTEL_MKL_ML
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#else
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#endif
#ifdef INTEL_MKL_ML
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#else
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklDnnShape dnn_shape_output;
dnn_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, dnn_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifndef INTEL_MKL_ML
// Set a dummy MKLDNN shape (called when the output is in TF format)
inline void SetDummyMklDnnShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context,
int idx_in, int idx_out,
const MklDnnShape& mkl_shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
AllocateOutputSetMklShape(context, idx_out, mkl_shape);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
// Forward the MKL shape ONLY (used in elementwise and other ops where
// we call the eigen implementation and MKL shape is not used)
inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
uint32 idx_data_in,
uint32_t idx_data_out) {
uint32 idx_meta_in =
GetTensorMetaDataIndex(idx_data_in, context->num_inputs());
uint32 idx_meta_out =
GetTensorMetaDataIndex(idx_data_out, context->num_outputs());
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifdef INTEL_MKL_ML
// Set a dummy MKL shape (called when the output is in TF format)
inline void SetDummyMklShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
// We don't need these functions in MKLDNN. We have defined equality operator
// on MklDnnShape class directly.
// Checks if the TF shape for both MKL tensors is the same or not
// Returns: true if both TF shapes are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const MklShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const MklShape* input_shape_1) {
return MklCompareShapes(input_shape_1, input_shape_0);
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->dims() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->dims();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// These functions do not compile with MKL-DNN since mkl.h is missing.
// We may need to remove them later.
// TODO(intel_tf): Remove this routine when faster MKL layout conversion is
// out.
inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = input.dim_size(0);
int64 H = input.dim_size(1);
int64 W = input.dim_size(2);
int64 C = input.dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C,
buf_out + n * stride_n, H * W);
}
}
inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = (*output)->dim_size(0);
int64 H = (*output)->dim_size(1);
int64 W = (*output)->dim_size(2);
int64 C = (*output)->dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W,
buf_out + n * stride_n, C);
}
}
#endif
// -------------------------------------------------------------------
#ifndef INTEL_MKL_ML
/// Return MKL-DNN data type (memory::data_type) for input type T
///
/// @input None
/// @return memory::data_type corresponding to type T
template <typename T>
static memory::data_type MklDnnType();
/// Instantiation for float type. Add similar instantiations for other
/// type if needed.
template <>
memory::data_type MklDnnType<float>() {
return memory::data_type::f32;
}
/// Map TensorFlow's data format into MKL-DNN data format
///
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::nhwc;
else if (format == FORMAT_NCHW)
return memory::format::nchw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to get rid of compiler warning
return memory::format::format_undef;
}
/// Map MKL-DNN data format to TensorFlow's data format
///
/// @input: memory::format
/// @return: Tensorflow data format corresponding to memory::format
/// Fails with an error if invalid data format.
inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) {
if (format == memory::format::nhwc)
return FORMAT_NHWC;
else if (format == memory::format::nchw)
return FORMAT_NCHW;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure
// that we don't come here.
return FORMAT_NHWC;
}
/// Map TensorShape object into memory::dims required by MKL-DNN
///
/// This function will simply map input TensorShape into MKL-DNN dims
/// naively. So it will preserve the order of dimensions. E.g., if
/// input tensor is in NHWC format, then dims will be in NHWC format
/// also.
///
/// @input TensorShape object in shape
/// @return memory::dims corresponding to TensorShape
inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) {
memory::dims dims(shape.dims());
for (int d = 0; d < shape.dims(); ++d) {
dims[d] = shape.dim_size(d);
}
return dims;
}
/// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN
///
/// This function is a specific one than above function. It will map input
/// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the
/// order of dimensions. E.g., if input tensor is in NHWC format, then dims
/// will be in NCHW format, and not in NHWC format.
///
/// @input TensorShape object in shape
/// @return memory::dims in MKL-DNN required NCHW format
inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex(format, 'C'));
int h = shape.dim_size(GetTensorDimIndex(format, 'H'));
int w = shape.dim_size(GetTensorDimIndex(format, 'W'));
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Overloaded version of function above. Input parameters are
/// self-explanatory.
inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = in_dims[GetTensorDimIndex(format, 'N')];
int c = in_dims[GetTensorDimIndex(format, 'C')];
int h = in_dims[GetTensorDimIndex(format, 'H')];
int w = in_dims[GetTensorDimIndex(format, 'W')];
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Map MklDnn memory::dims object into TensorShape object.
///
/// This function will simply map input shape in MKL-DNN memory::dims format
/// in Tensorflow's TensorShape object by preserving dimension order.
///
/// @input MKL-DNN memory::dims object
/// @output TensorShape corresponding to memory::dims
inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
std::vector<int32> shape(dims.size(), -1);
for (int d = 0; d < dims.size(); d++) {
shape[d] = dims[d];
}
TensorShape ret;
CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true);
return ret;
}
/// Function to calculate strides given tensor shape in Tensorflow order
/// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention,
/// dimesion with size 1 is outermost dimension; while dimension with size 4 is
/// innermost dimension. So strides for this tensor would be {4 * 3 * 2,
/// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}.
///
/// @input Tensorflow shape in memory::dims type
/// @return memory::dims containing strides for the tensor.
inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) {
CHECK_GT(dims_tf_order.size(), 0);
memory::dims strides(dims_tf_order.size());
int last_dim_idx = dims_tf_order.size() - 1;
strides[last_dim_idx] = 1;
for (int d = last_dim_idx - 1; d >= 0; d--) {
strides[d] = strides[d + 1] * dims_tf_order[d + 1];
}
return strides;
}
inline padding_kind TFPaddingToMklDnnPadding(Padding pad) {
// MKL-DNN only supports zero padding.
return padding_kind::zero;
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype) {
CHECK_EQ(dim.size(), strides.size());
// We have to construct memory descriptor in a C style. This is not at all
// ideal but MKLDNN does not offer any API to construct descriptor in
// blocked format except a copy constructor that accepts
// mkldnn_memory_desc_t.
mkldnn_memory_desc_t md;
md.primitive_kind = mkldnn_memory;
md.ndims = dim.size();
md.format = mkldnn_blocked;
md.data_type = memory::convert_to_c(dtype);
for (size_t i = 0; i < dim.size(); i++) {
md.layout_desc.blocking.block_dims[i] = 1;
md.layout_desc.blocking.strides[1][i] = 1;
md.layout_desc.blocking.strides[0][i] = strides[i];
md.layout_desc.blocking.padding_dims[i] = dim[i];
md.layout_desc.blocking.offset_padding_to_data[i] = 0;
md.dims[i] = dim[i];
}
md.layout_desc.blocking.offset_padding = 0;
return memory::desc(md);
}
template <typename T>
inline primitive FindOrCreateReorder(const memory* from, const memory* to);
/*
* Class to represent all the resources corresponding to a tensor in TensorFlow
* that are required to execute an operation (such as Convolution).
*/
template <typename T>
class MklDnnData {
private:
/// MKL-DNN memory primitive for input user memory
memory* user_memory_;
/// MKL-DNN memory primitive in case input or output reorder is needed.
memory* reorder_memory_;
/// Operations memory descriptor
memory::desc* op_md_;
/// CPU engine on which operation will be executed
const engine* cpu_engine_;
public:
explicit MklDnnData(const engine* e)
: user_memory_(nullptr),
reorder_memory_(nullptr),
op_md_(nullptr),
cpu_engine_(e) {}
~MklDnnData() {
cpu_engine_ = nullptr; // We don't own this.
delete (user_memory_);
delete (reorder_memory_);
delete (op_md_);
}
inline void* GetTensorBuffer(const Tensor* tensor) const {
CHECK_NOTNULL(tensor);
return const_cast<void*>(
static_cast<const void*>(tensor->flat<T>().data()));
}
/// Set user memory primitive using specified dimensions, memory format and
/// data_buffer. Function automatically uses element data type by using
/// input type T used for creating call object.
///
/// In a nutshell, function allows user to describe the input tensor to
/// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and
/// memory format HWIO, and the buffer that contains actual values is
/// pointed by data_buffer.
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
void* data_buffer = nullptr) {
auto md = memory::desc(dim, MklDnnType<T>(), fm);
SetUsrMem(md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, fm, GetTensorBuffer(tensor));
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim,
const memory::dims& strides) {
return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>());
}
/// A version of SetUsrMem call that allows user to create memory in blocked
/// format. So in addition to accepting dimensions, it also accepts strides.
/// This allows user to create memory for tensor in a format that is not
/// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6
/// dimensional tensor as a native format. But by using blocked format, a user
/// can create memory for 6D tensor.
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
void* data_buffer = nullptr) {
CHECK_EQ(dim.size(), strides.size());
auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides);
SetUsrMem(blocked_md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, strides, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts memory
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) {
auto pd = memory::primitive_desc(md, *cpu_engine_);
SetUsrMem(pd, data_buffer);
}
/// A version of SetUsrMem with memory descriptor and tensor
inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(md, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts primitive
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::primitive_desc& pd,
void* data_buffer = nullptr) {
CHECK_NOTNULL(cpu_engine_);
// TODO(nhasabni): can we remove dynamic memory allocation?
if (data_buffer) {
user_memory_ = new memory(pd, data_buffer);
} else {
user_memory_ = new memory(pd);
}
}
/// A version of SetUsrMem with primitive descriptor and tensor
inline void SetUsrMem(const memory::primitive_desc& pd,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(pd, GetTensorBuffer(tensor));
}
/// Get function for user memory primitive.
inline const memory* GetUsrMem() const { return user_memory_; }
/// Get function for primitive descriptor of user memory primitive.
inline const memory::primitive_desc GetUsrMemPrimDesc() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_primitive_desc();
}
/// Get function for descriptor of user memory.
inline memory::desc GetUsrMemDesc() {
// This is ugly. Why MKL-DNN does not provide desc() method of const type??
const memory::primitive_desc pd = GetUsrMemPrimDesc();
return const_cast<memory::primitive_desc*>(&pd)->desc();
}
/// Get function for data buffer of user memory primitive.
inline void* GetUsrMemDataHandle() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_data_handle();
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(void* data_buffer) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(data_buffer);
user_memory_->set_data_handle(data_buffer);
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(const Tensor* tensor) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(tensor);
user_memory_->set_data_handle(GetTensorBuffer(tensor));
}
/// Get the memory primitive for input and output of an op. If inputs
/// to an op require reorders, then this function returns memory primitive
/// for reorder. Otherwise, it will return memory primitive for user memory.
///
/// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to
/// execute Conv2D, we need memory primitive for I and F. Buf if reorder is
/// required for I and F (say I_r is reorder primitive for I; F_r is reorder
/// primitive for F), then we need I_r and F_r to perform Conv2D.
inline const memory& GetOpMem() const {
return reorder_memory_ ? *reorder_memory_ : *user_memory_;
}
/// Set memory descriptor of an operation in terms of dimensions and memory
/// format. E.g., For Conv2D, the dimensions would be same as user dimensions
/// but memory::format would be mkldnn::any because we want MKL-DNN to choose
/// best layout/format for given input dimensions.
inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) {
// TODO(nhasabni): can we remove dynamic memory allocation?
op_md_ = new memory::desc(dim, MklDnnType<T>(), fm);
}
/// Get function for memory descriptor for an operation
inline const memory::desc& GetOpMemDesc() const { return *op_md_; }
/// Predicate that checks if we need to reorder user's memory into memory
/// pointed by op_pd.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const {
CHECK_NOTNULL(user_memory_);
return op_pd != user_memory_->get_primitive_desc();
}
/// Predicate that checks if we need to reorder user's memory into memory
/// based on the provided format.
///
/// @input: target_format - memory format of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::format& target_format) const {
CHECK_NOTNULL(user_memory_);
return target_format !=
user_memory_->get_primitive_desc().desc().data.format;
}
/// Function to create a reorder from memory pointed by from to memory pointed
/// by to. Returns created primitive.
inline primitive CreateReorder(const memory* from, const memory* to) const {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
return reorder(*from, *to);
}
/// Function to handle input reordering
///
/// Check if we need to reorder this input of an operation.
/// Return true and allocate reorder memory primitive if reorder is needed.
/// Otherwise, return false and do not allocate reorder memory primitive.
///
/// To check if reorder is needed, this function compares memory primitive
/// descriptor of an operation (op_pd) for the given input with the
/// user-specified memory primitive descriptor.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
reorder_memory_ = new memory(op_pd);
std::vector<primitive> net;
net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_));
stream(stream::kind::eager).submit(net).wait();
return true;
}
return false;
}
/// Overloaded version of above function that accepts memory buffer
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_data_handle - memory buffer where output of reorder needs to be
/// stored. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle) {
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
std::vector<primitive> net;
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_));
stream(stream::kind::eager).submit(net).wait();
return true;
}
return false;
}
/// Another overloaded version of CheckReorderToOpMem that accepts Tensor
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_tensor - Tensor whose buffer is to be used to store output of
/// reorder. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net);
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor) {
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor));
}
/// Function to handle output reorder
///
/// This function performs very similar functionality as input reordering
/// function above. The only difference is that this function does not add
/// reorder primitive to the net. The reason for this is: the reorder
/// primitive for output needs to be added to the list only after operation
/// has executed. But we need to prepare a temporary buffer in case output
/// reorder is needed. And this temporary buffer will hold the output of
/// an operation before it is fed to reorder primitive.
///
/// @input memory primitive descriptor for the given output of an operation
/// @return: true in case reorder of output is needed; false, otherwise.
inline bool PrepareReorderToUserMemIfReq(
const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
return true;
}
return false;
}
/// Function to actually insert reorder primitive in the net
///
/// This function completes remaining part of output reordering. It inserts
/// a reordering primitive from the temporary buffer that holds the output
/// to the user-specified output buffer.
///
/// @input: net - net to which to add reorder primitive
inline void InsertReorderToUserMem(std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
net->push_back(CreateReorder(reorder_memory_, user_memory_));
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// InsertReorderToUserMem(std::vector<primitive>* net), will remove
/// slow path in the future
inline void InsertReorderToUserMem() {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
std::vector<primitive> net;
net.push_back(FindOrCreateReorder<T>(reorder_memory_, user_memory_));
stream(stream::kind::eager).submit(net).wait();
}
};
/// Base class for operations with reuse of primitives
///
class MklPrimitive {
public:
virtual ~MklPrimitive() {}
// Dummy data. Its size, hard-coded as 256 here, does
// not matter since MKL should never operate on this buffer.
unsigned char DummyData[256];
};
const mkldnn::memory::dims NONE_DIMS = {};
template <typename T>
class MklPrimitiveFactory {
public:
MklPrimitiveFactory() {}
~MklPrimitiveFactory() {}
MklPrimitive* GetOp(const std::string& key) {
auto stream_iter = MklPrimitiveFactory<T>::GetHashMap().find(key);
if (stream_iter == MklPrimitiveFactory<T>::GetHashMap().end()) {
return nullptr;
} else {
return stream_iter->second;
}
}
void SetOp(const std::string& key, MklPrimitive* op) {
auto stream_iter = MklPrimitiveFactory<T>::GetHashMap().find(key);
CHECK(stream_iter == MklPrimitiveFactory<T>::GetHashMap().end());
MklPrimitiveFactory<T>::GetHashMap()[key] = op;
}
private:
static inline std::unordered_map<std::string, MklPrimitive*>& GetHashMap() {
static thread_local std::unordered_map<std::string, MklPrimitive*> map_;
return map_;
}
};
// utility class for creating keys of MKL primitive pool.
class FactoryKeyCreator {
public:
FactoryKeyCreator() {
key_.reserve(kMaxKeyLength);
}
~FactoryKeyCreator() {}
void AddAsKey(const string& str) { Append(str); }
void AddAsKey(const mkldnn::memory::dims &dims) {
for (unsigned int i = 0; i < dims.size(); i++) {
AddAsKey<int>(dims[i]);
}
}
template <typename T>
void AddAsKey(const T data) {
auto buffer = reinterpret_cast<const char *>(&data);
Append(StringPiece(buffer, sizeof(T)));
}
std::string GetKey() {
return key_;
}
private:
string key_;
const char delimiter = 'x';
const int kMaxKeyLength = 256;
void Append(StringPiece s) {
key_.append(s.ToString());
key_.append(1, delimiter);
}
};
class MklReorderPrimitive : public MklPrimitive {
public:
explicit MklReorderPrimitive(const memory* from, const memory* to) {
Setup(from, to);
}
~MklReorderPrimitive() {}
std::shared_ptr<primitive> GetPrimitive() {
return context_.reorder_prim;
}
void SetMemory(const memory* from, const memory* to) {
context_.src_mem->set_data_handle(from->get_data_handle());
context_.dst_mem->set_data_handle(to->get_data_handle());
}
private:
struct ReorderContext {
std::shared_ptr<mkldnn::memory> src_mem;
std::shared_ptr<mkldnn::memory> dst_mem;
std::shared_ptr<primitive> reorder_prim;
ReorderContext():
src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) {
}
} context_;
engine cpu_engine_ = engine(engine::cpu, 0);
void Setup(const memory* from, const memory* to) {
context_.src_mem.reset(new memory(
{from->get_primitive_desc().desc(), cpu_engine_}, DummyData));
context_.dst_mem.reset(new memory(
{to->get_primitive_desc().desc(), cpu_engine_}, DummyData));
context_.reorder_prim = std::make_shared<mkldnn::reorder>(
reorder(*context_.src_mem, *context_.dst_mem));
}
};
template <typename T>
class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklReorderPrimitive* Get(const memory* from,
const memory* to) {
auto reorderPrim = static_cast<MklReorderPrimitive*>(
MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to));
if (reorderPrim == nullptr) {
reorderPrim = new MklReorderPrimitive(from, to);
MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(
from, to, reorderPrim);
}
reorderPrim->SetMemory(from, to);
return reorderPrim;
}
static MklReorderPrimitiveFactory & GetInstance() {
static MklReorderPrimitiveFactory instance_;
return instance_;
}
private:
MklReorderPrimitiveFactory() {};
~MklReorderPrimitiveFactory() {};
static std::string CreateKey(const memory* from, const memory* to) {
std::string prefix = "reorder";
FactoryKeyCreator key_creator;
auto const &from_desc = from->get_primitive_desc().desc().data;
auto const &to_desc = to->get_primitive_desc().desc().data;
memory::dims from_dims(from_desc.dims, &from_desc.dims[from_desc.ndims]);
memory::dims to_dims(to_desc.dims, &to_desc.dims[to_desc.ndims]);
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(static_cast<int>(from_desc.format));
key_creator.AddAsKey(static_cast<int>(from_desc.data_type));
key_creator.AddAsKey(from_dims);
key_creator.AddAsKey(static_cast<int>(to_desc.format));
key_creator.AddAsKey(static_cast<int>(to_desc.data_type));
key_creator.AddAsKey(to_dims);
return key_creator.GetKey();
}
MklPrimitive* GetReorder(const memory* from, const memory* to) {
std::string key = CreateKey(from, to);
return this->GetOp(key);
}
void SetReorder(const memory* from, const memory* to, MklPrimitive* op) {
std::string key = CreateKey(from, to);
this->SetOp(key, op);
}
};
/// Fuction to find(or create) a reorder from memory pointed by from to memory pointed
/// by to, it will created primitive or get primitive from pool if it is cached.
/// Returns the primitive.
template <typename T>
inline primitive FindOrCreateReorder(const memory* from, const memory* to) {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
MklReorderPrimitive *reorder_prim =
MklReorderPrimitiveFactory<T>::Get(from, to);
return *reorder_prim->GetPrimitive();
}
#endif // INTEL_MKL_DNN
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
|
convolution_1x1_pack8to4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack8to4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to4, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 4b-8a-inch/8a-outch/4
kernel_tm_pack8to4.create(4 * 8, inch / 8, outch / 8 + (outch % 8) / 4, (size_t)2u * 2, 2);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* k0 = (const float*)kernel + (p + 0) * inch;
const float* k1 = (const float*)kernel + (p + 1) * inch;
const float* k2 = (const float*)kernel + (p + 2) * inch;
const float* k3 = (const float*)kernel + (p + 3) * inch;
const float* k4 = (const float*)kernel + (p + 4) * inch;
const float* k5 = (const float*)kernel + (p + 5) * inch;
const float* k6 = (const float*)kernel + (p + 6) * inch;
const float* k7 = (const float*)kernel + (p + 7) * inch;
__fp16* g0 = kernel_tm_pack8to4.channel(p / 8);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g0[0] = (__fp16)k0[i];
g0[1] = (__fp16)k1[i];
g0[2] = (__fp16)k2[i];
g0[3] = (__fp16)k3[i];
g0[4] = (__fp16)k4[i];
g0[5] = (__fp16)k5[i];
g0[6] = (__fp16)k6[i];
g0[7] = (__fp16)k7[i];
g0 += 8;
}
k0 += 8;
k1 += 8;
k2 += 8;
k3 += 8;
k4 += 8;
k5 += 8;
k6 += 8;
k7 += 8;
}
}
for (; p + 3 < outch; p += 4)
{
const float* k0 = (const float*)kernel + (p + 0) * inch;
const float* k1 = (const float*)kernel + (p + 1) * inch;
const float* k2 = (const float*)kernel + (p + 2) * inch;
const float* k3 = (const float*)kernel + (p + 3) * inch;
__fp16* g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g0[0] = (__fp16)k0[i];
g0[1] = (__fp16)k1[i];
g0[2] = (__fp16)k2[i];
g0[3] = (__fp16)k3[i];
g0 += 4;
}
k0 += 8;
k1 += 8;
k2 += 8;
k3 += 8;
}
}
}
static void conv1x1s1_sgemm_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const __fp16* bias = _bias;
// interleave
Mat tmp;
if (size >= 8)
tmp.create(8, inch, size / 8 + (size % 8) / 4 + size % 4, elemsize, elempack, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4, inch, size / 4 + size % 4, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
{
int nn_size;
int remain_size_start = 0;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 8;
__fp16* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 8;
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
// transpose 8x4
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 8;
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
img0 += bottom_blob.cstep * 8;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
__fp16* outptr0 = top_blob.channel(p);
__fp16* outptr1 = top_blob.channel(p + 1);
const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p : zeros;
float16x8_t _bias0 = vld1q_f16(biasptr);
int i = 0;
for (; i + 7 < size; i += 8)
{
__fp16* tmpptr = tmp.channel(i / 8);
const __fp16* kptr = kernel.channel(p / 2);
int nn = inch; // inch always > 0
asm volatile(
"mov v24.16b, %10.16b \n"
"mov v25.16b, %10.16b \n"
"mov v26.16b, %10.16b \n"
"mov v27.16b, %10.16b \n"
"mov v28.16b, %10.16b \n"
"mov v29.16b, %10.16b \n"
"mov v30.16b, %10.16b \n"
"mov v31.16b, %10.16b \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n"
"fmla v24.8h, v16.8h, v0.h[0] \n"
"fmla v25.8h, v16.8h, v0.h[1] \n"
"fmla v26.8h, v16.8h, v0.h[2] \n"
"fmla v27.8h, v16.8h, v0.h[3] \n"
"fmla v28.8h, v16.8h, v0.h[4] \n"
"fmla v29.8h, v16.8h, v0.h[5] \n"
"fmla v30.8h, v16.8h, v0.h[6] \n"
"fmla v31.8h, v16.8h, v0.h[7] \n"
"fmla v24.8h, v17.8h, v1.h[0] \n"
"fmla v25.8h, v17.8h, v1.h[1] \n"
"fmla v26.8h, v17.8h, v1.h[2] \n"
"fmla v27.8h, v17.8h, v1.h[3] \n"
"fmla v28.8h, v17.8h, v1.h[4] \n"
"fmla v29.8h, v17.8h, v1.h[5] \n"
"fmla v30.8h, v17.8h, v1.h[6] \n"
"fmla v31.8h, v17.8h, v1.h[7] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v24.8h, v18.8h, v2.h[0] \n"
"fmla v25.8h, v18.8h, v2.h[1] \n"
"fmla v26.8h, v18.8h, v2.h[2] \n"
"fmla v27.8h, v18.8h, v2.h[3] \n"
"fmla v28.8h, v18.8h, v2.h[4] \n"
"fmla v29.8h, v18.8h, v2.h[5] \n"
"fmla v30.8h, v18.8h, v2.h[6] \n"
"fmla v31.8h, v18.8h, v2.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n"
"fmla v24.8h, v19.8h, v3.h[0] \n"
"fmla v25.8h, v19.8h, v3.h[1] \n"
"fmla v26.8h, v19.8h, v3.h[2] \n"
"fmla v27.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v19.8h, v3.h[4] \n"
"fmla v29.8h, v19.8h, v3.h[5] \n"
"fmla v30.8h, v19.8h, v3.h[6] \n"
"fmla v31.8h, v19.8h, v3.h[7] \n"
"fmla v24.8h, v20.8h, v4.h[0] \n"
"fmla v25.8h, v20.8h, v4.h[1] \n"
"fmla v26.8h, v20.8h, v4.h[2] \n"
"fmla v27.8h, v20.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[5] \n"
"fmla v30.8h, v20.8h, v4.h[6] \n"
"fmla v31.8h, v20.8h, v4.h[7] \n"
"fmla v24.8h, v21.8h, v5.h[0] \n"
"fmla v25.8h, v21.8h, v5.h[1] \n"
"fmla v26.8h, v21.8h, v5.h[2] \n"
"fmla v27.8h, v21.8h, v5.h[3] \n"
"fmla v28.8h, v21.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[6] \n"
"fmla v31.8h, v21.8h, v5.h[7] \n"
"fmla v24.8h, v22.8h, v6.h[0] \n"
"fmla v25.8h, v22.8h, v6.h[1] \n"
"fmla v26.8h, v22.8h, v6.h[2] \n"
"fmla v27.8h, v22.8h, v6.h[3] \n"
"fmla v28.8h, v22.8h, v6.h[4] \n"
"fmla v29.8h, v22.8h, v6.h[5] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v23.8h, v7.h[0] \n"
"fmla v25.8h, v23.8h, v7.h[1] \n"
"fmla v26.8h, v23.8h, v7.h[2] \n"
"fmla v27.8h, v23.8h, v7.h[3] \n"
"fmla v28.8h, v23.8h, v7.h[4] \n"
"fmla v29.8h, v23.8h, v7.h[5] \n"
"fmla v30.8h, v23.8h, v7.h[6] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
"ext v24.16b, v24.16b, v24.16b, #8 \n"
"ext v25.16b, v25.16b, v25.16b, #8 \n"
"ext v26.16b, v26.16b, v26.16b, #8 \n"
"ext v27.16b, v27.16b, v27.16b, #8 \n"
"ext v28.16b, v28.16b, v28.16b, #8 \n"
"ext v29.16b, v29.16b, v29.16b, #8 \n"
"ext v30.16b, v30.16b, v30.16b, #8 \n"
"ext v31.16b, v31.16b, v31.16b, #8 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr),
"w"(_bias0) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel.channel(p / 2);
int nn = inch; // inch always > 0
asm volatile(
"mov v24.16b, %10.16b \n"
"mov v25.16b, %10.16b \n"
"mov v26.16b, %10.16b \n"
"mov v27.16b, %10.16b \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n"
"fmla v24.8h, v16.8h, v0.h[0] \n"
"fmla v25.8h, v16.8h, v0.h[1] \n"
"fmla v26.8h, v16.8h, v0.h[2] \n"
"fmla v27.8h, v16.8h, v0.h[3] \n"
"fmla v24.8h, v17.8h, v0.h[4] \n"
"fmla v25.8h, v17.8h, v0.h[5] \n"
"fmla v26.8h, v17.8h, v0.h[6] \n"
"fmla v27.8h, v17.8h, v0.h[7] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v24.8h, v18.8h, v1.h[0] \n"
"fmla v25.8h, v18.8h, v1.h[1] \n"
"fmla v26.8h, v18.8h, v1.h[2] \n"
"fmla v27.8h, v18.8h, v1.h[3] \n"
"fmla v24.8h, v19.8h, v1.h[4] \n"
"fmla v25.8h, v19.8h, v1.h[5] \n"
"fmla v26.8h, v19.8h, v1.h[6] \n"
"fmla v27.8h, v19.8h, v1.h[7] \n"
"fmla v24.8h, v20.8h, v2.h[0] \n"
"fmla v25.8h, v20.8h, v2.h[1] \n"
"fmla v26.8h, v20.8h, v2.h[2] \n"
"fmla v27.8h, v20.8h, v2.h[3] \n"
"fmla v24.8h, v21.8h, v2.h[4] \n"
"fmla v25.8h, v21.8h, v2.h[5] \n"
"fmla v26.8h, v21.8h, v2.h[6] \n"
"fmla v27.8h, v21.8h, v2.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v22.8h, v3.h[0] \n"
"fmla v25.8h, v22.8h, v3.h[1] \n"
"fmla v26.8h, v22.8h, v3.h[2] \n"
"fmla v27.8h, v22.8h, v3.h[3] \n"
"fmla v24.8h, v23.8h, v3.h[4] \n"
"fmla v25.8h, v23.8h, v3.h[5] \n"
"fmla v26.8h, v23.8h, v3.h[6] \n"
"fmla v27.8h, v23.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"ext v24.16b, v24.16b, v24.16b, #8 \n"
"ext v25.16b, v25.16b, v25.16b, #8 \n"
"ext v26.16b, v26.16b, v26.16b, #8 \n"
"ext v27.16b, v27.16b, v27.16b, #8 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr),
"w"(_bias0) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
for (; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel.channel(p / 2);
float16x8_t _sum0 = _bias0;
for (int q = 0; q < inch; q++)
{
float16x8_t _r0 = vld1q_f16(tmpptr);
float16x8_t _k0 = vld1q_f16(kptr);
float16x8_t _k1 = vld1q_f16(kptr + 8);
float16x8_t _k2 = vld1q_f16(kptr + 16);
float16x8_t _k3 = vld1q_f16(kptr + 24);
float16x8_t _k4 = vld1q_f16(kptr + 32);
float16x8_t _k5 = vld1q_f16(kptr + 40);
float16x8_t _k6 = vld1q_f16(kptr + 48);
float16x8_t _k7 = vld1q_f16(kptr + 56);
_sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0);
_sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1);
_sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2);
_sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3);
_sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4);
_sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5);
_sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6);
_sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7);
kptr += 64;
tmpptr += 8;
}
vst1_f16(outptr0, vget_low_f16(_sum0));
vst1_f16(outptr1, vget_high_f16(_sum0));
outptr0 += 4;
outptr1 += 4;
}
}
remain_outch_start += nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
const __fp16 zeros[4] = {0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p * 4 : zeros;
float16x4_t _bias0 = vld1_f16(biasptr);
int i = 0;
for (; i + 7 < size; i += 8)
{
__fp16* tmpptr = tmp.channel(i / 8);
const __fp16* kptr = kernel.channel(p / 2 + p % 2);
int nn = inch; // inch always > 0
asm volatile(
"mov v24.16b, %8.16b \n"
"mov v25.16b, %8.16b \n"
"mov v26.16b, %8.16b \n"
"mov v27.16b, %8.16b \n"
"mov v28.16b, %8.16b \n"
"mov v29.16b, %8.16b \n"
"mov v30.16b, %8.16b \n"
"mov v31.16b, %8.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n"
"fmla v24.4h, v16.4h, v0.h[0] \n"
"fmla v25.4h, v16.4h, v0.h[1] \n"
"fmla v26.4h, v16.4h, v0.h[2] \n"
"fmla v27.4h, v16.4h, v0.h[3] \n"
"fmla v28.4h, v16.4h, v0.h[4] \n"
"fmla v29.4h, v16.4h, v0.h[5] \n"
"fmla v30.4h, v16.4h, v0.h[6] \n"
"fmla v31.4h, v16.4h, v0.h[7] \n"
"fmla v24.4h, v17.4h, v1.h[0] \n"
"fmla v25.4h, v17.4h, v1.h[1] \n"
"fmla v26.4h, v17.4h, v1.h[2] \n"
"fmla v27.4h, v17.4h, v1.h[3] \n"
"fmla v28.4h, v17.4h, v1.h[4] \n"
"fmla v29.4h, v17.4h, v1.h[5] \n"
"fmla v30.4h, v17.4h, v1.h[6] \n"
"fmla v31.4h, v17.4h, v1.h[7] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n"
"fmla v24.4h, v18.4h, v2.h[0] \n"
"fmla v25.4h, v18.4h, v2.h[1] \n"
"fmla v26.4h, v18.4h, v2.h[2] \n"
"fmla v27.4h, v18.4h, v2.h[3] \n"
"fmla v28.4h, v18.4h, v2.h[4] \n"
"fmla v29.4h, v18.4h, v2.h[5] \n"
"fmla v30.4h, v18.4h, v2.h[6] \n"
"fmla v31.4h, v18.4h, v2.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n"
"fmla v24.4h, v19.4h, v3.h[0] \n"
"fmla v25.4h, v19.4h, v3.h[1] \n"
"fmla v26.4h, v19.4h, v3.h[2] \n"
"fmla v27.4h, v19.4h, v3.h[3] \n"
"fmla v28.4h, v19.4h, v3.h[4] \n"
"fmla v29.4h, v19.4h, v3.h[5] \n"
"fmla v30.4h, v19.4h, v3.h[6] \n"
"fmla v31.4h, v19.4h, v3.h[7] \n"
"fmla v24.4h, v20.4h, v4.h[0] \n"
"fmla v25.4h, v20.4h, v4.h[1] \n"
"fmla v26.4h, v20.4h, v4.h[2] \n"
"fmla v27.4h, v20.4h, v4.h[3] \n"
"fmla v28.4h, v20.4h, v4.h[4] \n"
"fmla v29.4h, v20.4h, v4.h[5] \n"
"fmla v30.4h, v20.4h, v4.h[6] \n"
"fmla v31.4h, v20.4h, v4.h[7] \n"
"fmla v24.4h, v21.4h, v5.h[0] \n"
"fmla v25.4h, v21.4h, v5.h[1] \n"
"fmla v26.4h, v21.4h, v5.h[2] \n"
"fmla v27.4h, v21.4h, v5.h[3] \n"
"fmla v28.4h, v21.4h, v5.h[4] \n"
"fmla v29.4h, v21.4h, v5.h[5] \n"
"fmla v30.4h, v21.4h, v5.h[6] \n"
"fmla v31.4h, v21.4h, v5.h[7] \n"
"fmla v24.4h, v22.4h, v6.h[0] \n"
"fmla v25.4h, v22.4h, v6.h[1] \n"
"fmla v26.4h, v22.4h, v6.h[2] \n"
"fmla v27.4h, v22.4h, v6.h[3] \n"
"fmla v28.4h, v22.4h, v6.h[4] \n"
"fmla v29.4h, v22.4h, v6.h[5] \n"
"fmla v30.4h, v22.4h, v6.h[6] \n"
"fmla v31.4h, v22.4h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v23.4h, v7.h[0] \n"
"fmla v25.4h, v23.4h, v7.h[1] \n"
"fmla v26.4h, v23.4h, v7.h[2] \n"
"fmla v27.4h, v23.4h, v7.h[3] \n"
"fmla v28.4h, v23.4h, v7.h[4] \n"
"fmla v29.4h, v23.4h, v7.h[5] \n"
"fmla v30.4h, v23.4h, v7.h[6] \n"
"fmla v31.4h, v23.4h, v7.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"w"(_bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel.channel(p / 2 + p % 2);
int nn = inch; // inch always > 0
asm volatile(
"mov v24.16b, %8.16b \n"
"mov v25.16b, %8.16b \n"
"mov v26.16b, %8.16b \n"
"mov v27.16b, %8.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n"
"fmla v24.4h, v16.4h, v0.h[0] \n"
"fmla v25.4h, v16.4h, v0.h[1] \n"
"fmla v26.4h, v16.4h, v0.h[2] \n"
"fmla v27.4h, v16.4h, v0.h[3] \n"
"fmla v24.4h, v17.4h, v0.h[4] \n"
"fmla v25.4h, v17.4h, v0.h[5] \n"
"fmla v26.4h, v17.4h, v0.h[6] \n"
"fmla v27.4h, v17.4h, v0.h[7] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n"
"fmla v24.4h, v18.4h, v1.h[0] \n"
"fmla v25.4h, v18.4h, v1.h[1] \n"
"fmla v26.4h, v18.4h, v1.h[2] \n"
"fmla v27.4h, v18.4h, v1.h[3] \n"
"fmla v24.4h, v19.4h, v1.h[4] \n"
"fmla v25.4h, v19.4h, v1.h[5] \n"
"fmla v26.4h, v19.4h, v1.h[6] \n"
"fmla v27.4h, v19.4h, v1.h[7] \n"
"fmla v24.4h, v20.4h, v2.h[0] \n"
"fmla v25.4h, v20.4h, v2.h[1] \n"
"fmla v26.4h, v20.4h, v2.h[2] \n"
"fmla v27.4h, v20.4h, v2.h[3] \n"
"fmla v24.4h, v21.4h, v2.h[4] \n"
"fmla v25.4h, v21.4h, v2.h[5] \n"
"fmla v26.4h, v21.4h, v2.h[6] \n"
"fmla v27.4h, v21.4h, v2.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v22.4h, v3.h[0] \n"
"fmla v25.4h, v22.4h, v3.h[1] \n"
"fmla v26.4h, v22.4h, v3.h[2] \n"
"fmla v27.4h, v22.4h, v3.h[3] \n"
"fmla v24.4h, v23.4h, v3.h[4] \n"
"fmla v25.4h, v23.4h, v3.h[5] \n"
"fmla v26.4h, v23.4h, v3.h[6] \n"
"fmla v27.4h, v23.4h, v3.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"w"(_bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
for (; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel.channel(p / 2 + p % 2);
float16x4_t _sum0 = _bias0;
for (int q = 0; q < inch; q++)
{
float16x8_t _r0 = vld1q_f16(tmpptr);
float16x4_t _k0 = vld1_f16(kptr);
float16x4_t _k1 = vld1_f16(kptr + 4);
float16x4_t _k2 = vld1_f16(kptr + 8);
float16x4_t _k3 = vld1_f16(kptr + 12);
float16x4_t _k4 = vld1_f16(kptr + 16);
float16x4_t _k5 = vld1_f16(kptr + 20);
float16x4_t _k6 = vld1_f16(kptr + 24);
float16x4_t _k7 = vld1_f16(kptr + 28);
_sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0);
_sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1);
_sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2);
_sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3);
_sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4);
_sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5);
_sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6);
_sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7);
kptr += 32;
tmpptr += 8;
}
vst1_f16(outptr0, _sum0);
outptr0 += 4;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// __fp16* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const __fp16* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const __fp16* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const __fp16* r0 = bottom_blob.channel(p);
__fp16* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float16x8_t _v0 = vld1q_f16(r0);
float16x8_t _v1 = vld1q_f16(r0 + 16);
float16x8_t _v2 = vld1q_f16(r0 + 32);
float16x8_t _v3 = vld1q_f16(r0 + 48);
vst1q_f16(outptr, _v0);
vst1q_f16(outptr + 8, _v1);
vst1q_f16(outptr + 16, _v2);
vst1q_f16(outptr + 24, _v3);
r0 += 64;
outptr += 32;
}
for (; j + 1 < outw; j += 2)
{
float16x8_t _v0 = vld1q_f16(r0);
float16x8_t _v1 = vld1q_f16(r0 + 16);
vst1q_f16(outptr, _v0);
vst1q_f16(outptr + 8, _v1);
r0 += 32;
outptr += 16;
}
for (; j < outw; j++)
{
float16x8_t _v = vld1q_f16(r0);
vst1q_f16(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8to4_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
ompnumthread.c | /*
* $PIP_license: <Simplified BSD License>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
* $
* $RIKEN_copyright: Riken Center for Computational Sceience (R-CCS),
* System Software Development Team, 2016-2020
* $
* $PIP_TESTSUITE: Version 1.0.0$
*
* $Author: Atsushi Hori (R-CCS) mailto: ahori@riken.jp or ahori@me.com
* $
*/
#include <omp.h>
#include <stdio.h>
int nth;
int main() {
#pragma omp parallel
{
nth = omp_get_num_threads();
}
printf( "%d\n", nth );
if( !nth ) return 1;
return 0;
}
|
bks_fmt_plug.c | /*
* This software is Copyright (c) 2016, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_bks;
#elif FMT_REGISTERS_H
john_register_one(&fmt_bks);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "hmac_sha.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "twofish.h"
#include "sha.h"
#include "loader.h"
#include "simd-intrinsics.h"
#include "pkcs12.h"
#include "memdbg.h"
#define FORMAT_LABEL "BKS"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "PKCS12 PBE " SHA1_ALGORITHM_NAME
#define PLAINTEXT_LENGTH 31
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(ARCH_WORD_32)
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#if !defined(SIMD_COEF_32)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#else
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#endif
#define FORMAT_TAG "$bks$"
#define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define MAX_STORE_DATA_LENGTH 8192 // XXX ensure this is large enough
static struct fmt_tests tests[] = {
{"$bks$0$1$20$2036$20$a2c6157bea089967ccfa13670ae992a1265ab7b5$01001a636861726c65732070726f78792073736c2070726f7879696e6700000140737320ac000000000005582e353039000004623082045e30820346a003020102020101300d06092a864886f70d01010505003081913123302106035504030c1a436861726c65732050726f78792053534c2050726f7879696e6731243022060355040b0c1b687474703a2f2f636861726c657370726f78792e636f6d2f73736c3111300f060355040a0c08584b3732204c74643111300f06035504070c084175636b6c616e643111300f06035504080c084175636b6c616e64310b3009060355040613024e5a3020180f31383939313233313132303030305a170d3338303932343033313930355a3081913123302106035504030c1a436861726c65732050726f78792053534c2050726f7879696e6731243022060355040b0c1b687474703a2f2f636861726c657370726f78792e636f6d2f73736c3111300f060355040a0c08584b3732204c74643111300f06035504070c084175636b6c616e643111300f06035504080c084175636b6c616e64310b3009060355040613024e5a30820122300d06092a864886f70d01010105000382010f003082010a02820101008349587455efb272e397a31d3b52d9b13115c93f320766d2d451117f45c40285506027079ed439cabb94d44f1ae136eb1e79bf77abe43345ad1d436809cf9e035c439272f3ca917dcadd7fbd0e3929f1a345f0b89096130bbd116f8d3ab5655789b7b0831325bd22903f198da6bdda30c08dfd17ce9ab51c48555264307bcf789a2b6c48df4ecaf3ea2c092ee737ad8f397900ac03303bfe2ae43549030a7866cb6fe9b04b9f6ec498b4e7369e99b45491bf093858a77c72f8adc818e018d413265e39446be514f78eb57a23aa88f630776f861a9163e04ad38ee8a5c9219d0fc23f6b9a6324455dea6f4a6a251eca1fa3d6288cb89fd12a2062a3a015a56f250203010001a381bc3081b9300f0603551d130101ff040530030101ff307706096086480186f842010d046a136853534c2050726f7879696e6720697320656e61626c656420696e20436861726c65732050726f78792e20506c6561736520766973697420687474703a2f2f636861726c657370726f78792e636f6d2f73736c20666f72206d6f726520696e666f726d6174696f6e2e300e0603551d0f0101ff040403020204301d0603551d0e04160414bb27f4cb2eb6dbb058101bbd803f38d208d76129300d06092a864886f70d010105050003820101000041f935f30b209e56360f7e3d9c30314a213323c47edcea1467600a50ffe4e8e39dfca8c8d34463c34745ff04c870f1df28bb772db0cf1bca677b70842c742bc6d5fb00559ad643c6bf2c95bd0b855a961d7d6a3eada9c642e9a789474c4ad838c6f732d8d859548d30829df7a32d098fe3f00147daf08c0b37dd597184c1e27a61ea42050c73994e809013cb21e37bf84bf923bcefea6164fd28ab9058ccc48f1f486fc1c47ebd8a9c933f542401b11f36a003e47b141a41c7b326d18d023e11edb445699aa44800254ea33f174fd5eb1ccce6a09365751ff905988c06315b5575067bf65ec24cad1a6a601846d1d2f51f1f420a2762990b044000619d1c8400$3b798574df20a2be48edb0b0c687cce2cf5c293c", "secret"},
// https://github.com/doublereedkurt/pyjks/blob/master/tests/keystores/bks/christmas.bksv1
{"$bks$0$1$20$1730$20$a9e6ba49c14bd8fd2c973d48f0241a4208effcfd$020009706c61696e5f6b657900000154b6ca8fa5000000000200035241570003444553000000084cf2fe915d082a430400127365616c65645f707269766174655f6b657900000154b6ca8f6e000000010005582e3530390000019c3082019830820101a003020102020100300d06092a864886f70d01010b050030123110300e06035504030c0752534131303234301e170d3136303531353233343030385a170d3138303531353233343030385a30123110300e06035504030c075253413130323430819f300d06092a864886f70d010101050003818d0030818902818100b7201edbbf265bb253c299533704df2c990978c16e97a04b9556a6af1df11e60e5e138502fc1337879dfdd4461ede4f08b3303fd1b80befb1be09d9c3fcffc2c1caeb9c83d0142dac39d7c341bd4bc07b7fee23c162941f4b4fb221d9f93388cce2b21beffdd458be9244babf34e28f25ae620b4b883617bb5e9851364c0dd350203010001300d06092a864886f70d01010b05000381810097c7fb5997212b8ff7afa863be886d1d1a0947b7392d83a6304b60cbb76a9f7172095f123254aafdd315d933650993df354f82ac85d9467178ba8eda397149cf0df4dbba9d7bdbdffd83b710c2c8c8bf25ef4dda3d49cca820159eeb97b133c5f324219b4d1294d524a85d5e6b77e38f42814052f5134a938d29342b21bed1a4000002ac00000014ad6391270981b833ba68bbfa225f087ab7e345240000075cea5b86ca6a5e80e0b1fd06a745bcc2bef875e4746db35c59a00bbac4f398cf202fe97b60813848b21c7a36f3faccac560bd05506f5b44322ed519af34190bdf10905e81d8569c3c32db3238bfefee5328c21883d82c6b9a9e07cf975aa8559368b2f9212a7e01103c21c5136e6da0b0ecdd8bcba1e071f9f084d59349c18d4e6af418ab0ca9ce73fa4bd38e1bf84e809f001069f9c821d3ee44bb23182d229b782607bd47e68e7ff299ed1e28f7cb3cb03af5ef90711db5b306ec592a2bef7a5559a06290eaab19ab77a02caf3297e24bdc1aeb14d99d2e838863f355c738d91e4496e4f10f5a7bde22c0425524e164f198407ad99d5433fdbd6bd2adf50bbe2c909ae4e18effa5fb62059614aa646639fe4963f5e34d33b030e708fe5a816cb3d9596a0e394114f2622ce7694174e2399de1e04ae42022cceb2f5da0c273f7fc6f45bd7991b8e85df594a171a0fe64a73e1b9a0f57492eaaa35d5498c7ebbd28fcd23c12a006fc147cd5209168eadf40b53d4196066352e7cf562858c62dd746f43c48542d174857d03142a680b975948ebba31ad1010d1a7fbf8cbc3c0b16c376d8567212666aa01a420db3028695c289f0625e4d8fb872c358fb9e821c6346055d7c04ec545688cd011e9f60ba2e80959ec8fed703044ea7422d0ce4f7a401a05db9afc30b39c64b0e118599f39124df1c28298ee625a3c0095e0ecb14ae72dacc8106a52a258ea1e2554005bfa7c4e90e1a999a949e6bead7f333de2bb036b047c86cb1e6c8931d189b07647e500c04f8c4772fa630c328d60b0cb3a9e209ba0e574194dd96f4f4e6465273ae7c6c5d73eec505da065294803971584a60b2222bff62b36ad59cfcff893999ff484f2849186284303c1c2369445d466850ec7737d8313229af62576bfc2962284650400117365616c65645f7075626c69635f6b657900000154b6ca8f9500000000000000d400000014a39c93f59d151905a421db0646b31841d7ddb375000004e5faab6bfd6d6ddef0253fb9669a8ce679b583b4d8e18d42a62504e916c9647ff3b00eaad96e5410ab11bbf642e297c60954bdb065da4db0a69e0b2e6baa5ed8361939d4aec7599919d20cbaf05483655a4a5bb7fcc1b7ff33196a7df54779245d14f2f68636d1983d03c2af9ebbc78ed57116f58b019d810a8d9f03b45b4d56b2fbfeab0f7c8d506e1add5cd83fe06be0873cff5a198d213085df165dbf8d98371b9bf2d9ecc67a91f8ac731eecdbc7c46661fe4efb470da50100046365727400000154b6ca8f6e000000000005582e3530390000019c3082019830820101a003020102020100300d06092a864886f70d01010b050030123110300e06035504030c0752534131303234301e170d3136303531353233343030385a170d3138303531353233343030385a30123110300e06035504030c075253413130323430819f300d06092a864886f70d010101050003818d0030818902818100b7201edbbf265bb253c299533704df2c990978c16e97a04b9556a6af1df11e60e5e138502fc1337879dfdd4461ede4f08b3303fd1b80befb1be09d9c3fcffc2c1caeb9c83d0142dac39d7c341bd4bc07b7fee23c162941f4b4fb221d9f93388cce2b21beffdd458be9244babf34e28f25ae620b4b883617bb5e9851364c0dd350203010001300d06092a864886f70d01010b05000381810097c7fb5997212b8ff7afa863be886d1d1a0947b7392d83a6304b60cbb76a9f7172095f123254aafdd315d933650993df354f82ac85d9467178ba8eda397149cf0df4dbba9d7bdbdffd83b710c2c8c8bf25ef4dda3d49cca820159eeb97b133c5f324219b4d1294d524a85d5e6b77e38f42814052f5134a938d29342b21bed1a403000c73746f7265645f76616c756500000154b6ca8f9f0000000000000009020305070b0d1113170400117365616c65645f7365637265745f6b657900000154b6ca8f9a000000000000003c00000014b3cd07a06cc6354ffaae1a63a297e46ed8791ff800000437fc0a1b31876167b84e1d85b00dfdae0ee0ad42ad3cfdae41f8022e1a719f56eb00$fdf1915288bcaa30ad5192bcc327db290b1c21e0", "12345678"},
// https://github.com/doublereedkurt/pyjks/blob/master/tests/keystores/bks/christmas.bksv2
{"$bks$0$2$160$1141$20$de18c5bf26bbce0c7a3e6b9685f3028c3a58c5c2$020009706c61696e5f6b657900000154b6ca8fe3000000000200035241570003444553000000084cf2fe915d082a430400127365616c65645f707269766174655f6b657900000154b6ca8fbd000000010005582e3530390000019c3082019830820101a003020102020100300d06092a864886f70d01010b050030123110300e06035504030c0752534131303234301e170d3136303531353233343030385a170d3138303531353233343030385a30123110300e06035504030c075253413130323430819f300d06092a864886f70d010101050003818d0030818902818100b7201edbbf265bb253c299533704df2c990978c16e97a04b9556a6af1df11e60e5e138502fc1337879dfdd4461ede4f08b3303fd1b80befb1be09d9c3fcffc2c1caeb9c83d0142dac39d7c341bd4bc07b7fee23c162941f4b4fb221d9f93388cce2b21beffdd458be9244babf34e28f25ae620b4b883617bb5e9851364c0dd350203010001300d06092a864886f70d01010b05000381810097c7fb5997212b8ff7afa863be886d1d1a0947b7392d83a6304b60cbb76a9f7172095f123254aafdd315d933650993df354f82ac85d9467178ba8eda397149cf0df4dbba9d7bdbdffd83b710c2c8c8bf25ef4dda3d49cca820159eeb97b133c5f324219b4d1294d524a85d5e6b77e38f42814052f5134a938d29342b21bed1a4000002ac00000014755493361b0d7e3d4daed45a75199454cfe61585000005acae83870b92b606cc44facf5ce35598be2da231bf4ae80da2c2157f89e841d27e98416e569fca7842acf0542f6b4027682c565bcad8a169c4a641929f19be3935446cf41c5994bf8a5b3fef45b640dacd9396e501a957e5d8d0a93bdf308640378788b6c0be629a8b6bd8cfa874ee3d71998ff6dd550a7865e36b373981ca42a70c0228193b717a98ea0da7817cb6f8f7d4fc4061acb6a39eb4bce14b98a94388f28ddd87dfbc13ac925fb2c1dd7a907485a34d66832b72eb92cb8e60c09c17b3a95769a81e773c248256146f5c3c8f04ffb26a75a8314c9c3be058963c9907a52e2f05cc1ca4921ac0b34ec8f6a1b27013f57399c20c681ef3f0f6f5a2879a2af4a7e746d2ddfd0b33917abd005dc59225fda2a7d1fe026ce8014e85bfc8e23932b3ae6af53a6ebe52def1f89942ec899eff2e74324ce45329c02114003dfc26211fdc3a0f3a9045331c2b0fe20c0985e4e24725c959c19c87b1679f76c2aefe447413adddf992bdc143b023373c894ebed1e2106236ebbbc8bf6d71c5770c50aab7c38e054299391ccaba845b11f88f40e12d126fa94584621921355caf293e876b71ba9e74d7577a83737ca92e581454bd1f6ea271379846aca2dc032d53181d7b8f61f98cbc43215023b512b39176650db6dabb6d038f7890d1d730da57e2f4595e9edc6466cf1eca12fd35413834a957cc341181439de13971c67794d8fadf2bcb296cf8e1a2058957af2a194e32599626f86d4dae838f944c7d495c9a36c72190fdd2cf55bafd12134383832bb8c23597d0dcc87754f575e77ccc802ba0f3d7662f01db277d95bcddec55927af67753e07c0d4ee509465a122c6b0adb097d92158479d4836dc2e03b3a493d44dda8eb9895327216460d2926e122868af3f6f82db073af041b66668e2e16b882690400117365616c65645f7075626c69635f6b657900000154b6ca8fcc00000000000000d400000014921f89acc2d3f90372e5b61a04294b6efb34075100000434a69feb6389d6bb8b561139a8c877ae1ae87167a709008e55350abbdd8fa0a77dbe1609e2685fcae5c42825c7dc65ac0dc2fce4a4d8688b71eaaa094461cadce7c74d9a4bfb507d7fb4d31567d934f5e69829c12bdccc494876fd3fa12bde21c3c1525e05c580c87a4272212f0c3bce9fc9a32716ebfb50333563529a2bbc92a56a811c083135ed2fc38d4a203d3bc2fa3cddeac9e8d6cbc760beaedb8dd9312e8d30be0f6976f9ab7681c5902ab210c2d11202d9181a8d250100046365727400000154b6ca8fbd000000000005582e3530390000019c3082019830820101a003020102020100300d06092a864886f70d01010b050030123110300e06035504030c0752534131303234301e170d3136303531353233343030385a170d3138303531353233343030385a30123110300e06035504030c075253413130323430819f300d06092a864886f70d010101050003818d0030818902818100b7201edbbf265bb253c299533704df2c990978c16e97a04b9556a6af1df11e60e5e138502fc1337879dfdd4461ede4f08b3303fd1b80befb1be09d9c3fcffc2c1caeb9c83d0142dac39d7c341bd4bc07b7fee23c162941f4b4fb221d9f93388cce2b21beffdd458be9244babf34e28f25ae620b4b883617bb5e9851364c0dd350203010001300d06092a864886f70d01010b05000381810097c7fb5997212b8ff7afa863be886d1d1a0947b7392d83a6304b60cbb76a9f7172095f123254aafdd315d933650993df354f82ac85d9467178ba8eda397149cf0df4dbba9d7bdbdffd83b710c2c8c8bf25ef4dda3d49cca820159eeb97b133c5f324219b4d1294d524a85d5e6b77e38f42814052f5134a938d29342b21bed1a403000c73746f7265645f76616c756500000154b6ca8fe30000000000000009020305070b0d1113170400117365616c65645f7365637265745f6b657900000154b6ca8fd1000000000000003c000000146c18877619eac9f77da0d86bd8a18639eb084f5f000004bd0c558b25c1657b8b9c25079e64196fe43e9fec0ef5d44b2d8a0dbd09c92a8b4e00$3b99d6fd87755af63606414be2b75b9cfa3751c7", "12345678"},
// christmas.uber
{"$bks$1$1$20$1141$20$fcc7b038c0ca3e1b99e0bc1192ed999a66129a2d$c561a20373785bfa46d530192cfe16c3edf9ccacc75e53d2c1c7bafd64c77d1aea9c52817d9a93224bf49cca1273de0856d32a82f4ce97b550abb98fd9f82297814784774396bea9fb3282fdb75b33ed59d52a5eefc2486b0726dc2156ca8257f41d033f8c41276ce78c2155b80eeb97d9a3d8e065a73bb9d5ab1840d60ea56cc04b00a87346d8a580829b9e437869b4a39626b1d17e169589de1e78e8cd6261dcd8b48a3ae52b89f90b2af60f395aff5cca0281c5b6f5b4dcdb8d9a30090e41dc033c0a0426d03c35ec1264c5ca1710c32d69fe6f222cd913f56392d3c4e3c80a0a6118bd4054a3f932728b9f855ac3ed45f1ec9209c2be6e807ec427e576781244df751f52f858c23a7985ed667ed739eee6151ad0d28f520406eb30a8c27da2fd5cdb471cd73e5c1f0c746527414a65efa39b336aeb43f03c556f01e4aec7f464313d4f238316ab229d854bc48e6dad068c9a56a00f2c188cea1baeea08420f1ee82789a089678b9bf134b95147c83f0962e5f45c96e5a9b43628c4df0f415885f857932d9344c409a25d8ff7918d228ddc25a5940f18f00f1a83b7ccbe520dca92eb9a360857857c46a70c0becfff0a66286488313835406e6e9e9053d1e139226e82471171f1748a0447b2efd015d87399118c548a048270c61034c832265bd9104be3ea7910c9e730c2d2ddfb5edc761634a388ae364c91b2662ce4e437e0f954cf14dee83e01dec7c7aeb8d0c63ed099e8ec28aa64b54368159ec819d69fb028554ad32af7318602b98fa2d0cfed206ae8973a6c305c80f8972f9b245808364d599c9cd6847ba2ee44e9fe07be2c2323ea28ff8d8965036849085d9947153730a367e955b67d195510c6a73993c403224bc5877bcf04be2b9c42c7fe3ea28c953e4c21278b3e12f21a541758f41b08689eba4985ad18e6e113f3abef6479df04aac104079ab8677cfeae33bc090d47f7f17c1386bdf099282fa079a48ce4f94d661f4ec762892faf680131f0372555f3436ac7b364eb64570d1b46e3d8ebd97236d01abe217c8a95ab0ba097a56f45f96c18b777343e7214748262f083a9fdcd4e4331e50d17dc58dfedfa459e87bd71be8a09283b25b1a0e6daba7d03ca2580c02edcae8e43f2d8ebdff37385bdcfa73f057d636c970278a01b7a1f02878e1961ef5a7fab4bd2e788bf356688f5b47d1573db600d91474c5a802ce27c789fd02df30ab719f8bcd7c58eb5bd45b9e20d1bb2f26c1f3cdd32247dd7268f56cb187734e0d977a5b2aef80622960156042b65448eee980f7d77d8b2519aa5f216a6300c8b534220feaf56ade0369e5082fc4dd623f134abef8b8253b4e7dc8a9a40188598b18512ba77d009c5fbdd2159fd8707dfea97b1e7e8c4297dd7499aaaa7c09097f057badc385e9de29a2b668529fc280f6cea1078d0d79834763a24ba38f19e4654dfe7cbb9ff08f122509433a3d9d055b864c8a9a88518fd93e50ee9c7f678aaf52c25f3f4858996add60e6c21204c1e22fbb02b9090578cc1f42861dc93f955d81f00decee8fe7405367ee986835b0be865084964226e47c77c764b823421383299e76fbcfd64c2ad085681110f772e4c2f526eab0d6a7a771814613f745230ac8bd1055c5612f17cb3b5c4459b809f082bb11bfdee8d1db48d0616e51ee77594ad92a417027665beb208888f5b030a022bf9850d69d18e883fcb4c47274db3708d5a5c59bc1e25842de6dce350d1c2c16d7f9c7103ed15c6f25508cc27199569d24bf7e55e960a0006e177470a1a3cc33f540324565c0a67d20a2c7b8389e8aa375e4b4515d451ce0dce43cf99a2a6f2f6adba5206c243a8aa2a974a2737923b61f8c3c86253fe896fd9c8ab9a66f17086dde38b539cb23a2d7e261299d8dc12639cdbd56a5813026a3670f9ac79346b302b00f1c3d41fd566d9e3a3ad2d756948f5f97e1967fcc1410a1078191fb89b51a137a70ae505dfa65a96b9e4d70f562b4f2c715c1f782fd79d02f98aead88f1eb5d8722b4fef4699218f003f99270b852a0f892f9f23827820900288a4a1fa5978b44bd3035de46f6aa8a3e32ade38190756658836c0fde5b9b34a16a8e97cdcbace80bb86d88dceaeb9a2b87ff2a02bd004bb1d4f08232b91b476b3ca0873ef132b0a70d939a11c4fe66bc2b113eba14c2e4728499441b6f26ec014968c95e71ff23d33c4267c67af21b393a770e03a8781b10159a20cfceab34dc592a394e11fd649cfe71eaf7ef7de863d1a6eff623a67b7cc94b114cc646df2380e4851c614eb54d59476419964063d9f85661c079719ba4d2ce5872820f18a34f384dd48fb20907c8ffa521ef3e6a4302524c442a2230f8b574a82e78f1e699d3b2621132bda6a3238e9b41bd773d3d476dfc11ef549683b789e286a54d4c8ea01a727f9bdbfd6310e8882b2ed26b75edc9958cabe3beed1cc167c7f2ab759326b0e4026ac63e2489ea4da6814fbf547a6637fc1d0fc78f79c5517b7e329ea226c2d29281cac160a7709953ab1dcdb731e68b940106bee0d9d68720b66d2953ad3d2185596850dbb4cfc3654161fbfee8be7258b9a46a82e8c8c908fc93c235b9ae101b4e66c7b52434fe6c4147f035cd5d42a4a8b047d33d907e89018a994f343f241643ad1273ca8d486de956931e7f94c197460ec09c100fb27ef98d1e57e1cb736adb943a83193ce586aa5c61e5c0bd25f5fd846beb9cb8212643b752e0373f17a0061a5b350b4d06b405c5c0949b5733996eb67262893ecb4bd1213a04ad4c7900b441c103250a01b41075b70f13c26937b149e41c180f55b8d472273679032c1a16058d05e43fedcde7164527ca82b160cc9deb53675bbe0db70a47a5decc64f500d9dd7686240366$0000000000000000000000000000000000000000", "12345678"},
{NULL}
};
#ifdef _MSC_VER
#define custom_salt bks_custom_salt
#define cur_salt bks_cur_salt
#endif
static struct custom_salt {
int format; // 0 -> BKS keystore
int version; // BKS version
int hmac_key_size;
int iteration_count;
int saltlen;
unsigned char salt[20];
int store_data_length;
unsigned char store_data[MAX_STORE_DATA_LENGTH];
unsigned char store_hmac[20];
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static size_t *saved_len;
static int *cracked, any_cracked; // "cracked array" approach is required for UBER keystores
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
static int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
Twofish_initialise();
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext, *ctcopy, *keeptr;
int format, version, saltlen, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL) // format
goto bail;
if (!isdec(p))
goto bail;
format = atoi(p);
if (format != 0 && format != 1) // 0 -> BKS keystore, 1 -> UBER keystore
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // version
goto bail;
if (!isdec(p))
goto bail;
version = atoi(p);
if (version != 1 && version != 2) // BKS, BKS-v1
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // hmac_key_size
goto bail;
if (!isdec(p))
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // iteration_count
goto bail;
if (!isdec(p))
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // saltlen
goto bail;
if (!isdec(p))
goto bail;
saltlen = atoi(p);
if (saltlen > 20)
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // salt
goto bail;
if (hexlenl(p, &extra) > saltlen * 2 || extra)
goto bail;
if (!ishexlc(p))
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // store_data
goto bail;
if (hexlenl(p, &extra) > MAX_STORE_DATA_LENGTH * 2 || extra)
goto bail;
if (!ishexlc(p))
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // store_hmac
goto bail;
if (hexlenl(p, &extra) != 20*2 || extra)
goto bail;
if (!ishexlc(p))
goto bail;
p = strrchr(ciphertext, '$');
if (!p)
goto bail;
p = p + 1;
if (!ishexlc(p))
goto bail;
MEM_FREE(keeptr);
return 1;
bail:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
int i;
char *p = ciphertext, *ctcopy, *keeptr;
memset(&cs, 0, sizeof(cs));
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LENGTH;
p = strtokm(ctcopy, "$");
cs.format = atoi(p);
p = strtokm(NULL, "$");
cs.version = atoi(p);
p = strtokm(NULL, "$");
cs.hmac_key_size = atoi(p);
p = strtokm(NULL, "$");
cs.iteration_count = atoi(p);
p = strtokm(NULL, "$");
cs.saltlen = atoi(p);
p = strtokm(NULL, "$");
for(i = 0; i < cs.saltlen; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])];
p = strtokm(NULL, "$");
cs.store_data_length = hexlenl(p, 0) / 2;
for(i = 0; i < cs.store_data_length; i++)
cs.store_data[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])];
p = strtokm(NULL, "$");
if (cs.format == 0) { // BKS keystore
for(i = 0; i < 20; i++)
cs.store_hmac[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])];
}
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int index;
const int count = *pcount;
if (any_cracked) {
memset(cracked, 0, sizeof(*cracked) * count);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#if !defined(SIMD_COEF_32)
if (cur_salt->format == 0) {
unsigned char mackey[20];
int mackeylen = cur_salt->hmac_key_size / 8;
// mackeylen is only 2 bytes, and this results in lot
// of collisions (which work just fine)
//
// FMT_NOT_EXACT can be turned on for BKS keystores
// for finding more possible passwords
unsigned char store_hmac_calculated[20];
pkcs12_pbe_derive_key(1, cur_salt->iteration_count,
MBEDTLS_PKCS12_DERIVE_MAC_KEY,
(unsigned char*)saved_key[index],
saved_len[index], cur_salt->salt,
cur_salt->saltlen, mackey, mackeylen);
hmac_sha1(mackey, mackeylen, cur_salt->store_data,
cur_salt->store_data_length,
store_hmac_calculated, 20);
if (!memcmp(store_hmac_calculated, cur_salt->store_hmac, 20))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
} else if (cur_salt->format == 1) {
unsigned char compute_checkum[20];
unsigned char iv[16];
unsigned char key[32];
Twofish_key tkey;
int datalen = 0;
unsigned char store_data_decrypted[MAX_STORE_DATA_LENGTH];
SHA_CTX ctx;
pkcs12_pbe_derive_key(1, cur_salt->iteration_count,
MBEDTLS_PKCS12_DERIVE_IV,
(unsigned char*)saved_key[index],
saved_len[index], cur_salt->salt,
cur_salt->saltlen, iv, 16);
pkcs12_pbe_derive_key(1, cur_salt->iteration_count,
MBEDTLS_PKCS12_DERIVE_KEY,
(unsigned char*)saved_key[index],
saved_len[index], cur_salt->salt,
cur_salt->saltlen, key, 32);
Twofish_prepare_key(key, 32, &tkey);
datalen = Twofish_Decrypt(&tkey, cur_salt->store_data, store_data_decrypted, cur_salt->store_data_length, iv);
if (datalen < 0)
continue;
SHA1_Init(&ctx);
SHA1_Update(&ctx, store_data_decrypted, datalen - 20);
SHA1_Final(compute_checkum, &ctx);
if (!memcmp(compute_checkum, store_data_decrypted + datalen - 20, 20))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
#else
size_t lens[SSE_GROUP_SZ_SHA1], j;
const unsigned char *keys[SSE_GROUP_SZ_SHA1];
// Load keys, and lengths
for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {
lens[j] = saved_len[index+j];
keys[j] = (const unsigned char*)(saved_key[index+j]);
}
if (cur_salt->format == 0) {
unsigned char *mackey[SSE_GROUP_SZ_SHA1], real_keys[SSE_GROUP_SZ_SHA1][20];
int mackeylen = cur_salt->hmac_key_size / 8;
// mackeylen is only 2 bytes, and this results in lot
// of collisions (which work just fine)
//
// FMT_NOT_EXACT can be turned on for BKS keystores
// for finding more possible passwords
unsigned char store_hmac_calculated[20];
for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j)
mackey[j] = real_keys[j];
pkcs12_pbe_derive_key_simd(1, cur_salt->iteration_count,
MBEDTLS_PKCS12_DERIVE_MAC_KEY,
keys, lens, cur_salt->salt,
cur_salt->saltlen, mackey, mackeylen);
for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {
hmac_sha1(mackey[j], mackeylen, cur_salt->store_data,
cur_salt->store_data_length,
store_hmac_calculated, 20);
if (!memcmp(store_hmac_calculated, cur_salt->store_hmac, 20))
{
cracked[index+j] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
} else if (cur_salt->format == 1) {
unsigned char iv_[SSE_GROUP_SZ_SHA1][16], *iv[SSE_GROUP_SZ_SHA1];
unsigned char ckey_[SSE_GROUP_SZ_SHA1][32], *ckey[SSE_GROUP_SZ_SHA1];
Twofish_key tkey;
int datalen = 0;
unsigned char store_data_decrypted[MAX_STORE_DATA_LENGTH];
SHA_CTX ctx;
for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {
iv[j] = iv_[j];
ckey[j] = ckey_[j];
}
pkcs12_pbe_derive_key_simd(1, cur_salt->iteration_count,
MBEDTLS_PKCS12_DERIVE_IV,
keys,
lens, cur_salt->salt,
cur_salt->saltlen, iv, 16);
// lengths get tromped on, so re-load them for the load keys call.
for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j)
lens[j] = saved_len[index+j];
pkcs12_pbe_derive_key_simd(1, cur_salt->iteration_count,
MBEDTLS_PKCS12_DERIVE_KEY,
keys,
lens, cur_salt->salt,
cur_salt->saltlen, ckey, 32);
for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {
unsigned char compute_checkum[20];
Twofish_prepare_key(ckey[j], 32, &tkey);
datalen = Twofish_Decrypt(&tkey, cur_salt->store_data, store_data_decrypted, cur_salt->store_data_length, iv[j]);
if (datalen < 0)
continue;
SHA1_Init(&ctx);
SHA1_Update(&ctx, store_data_decrypted, datalen - 20);
SHA1_Final(compute_checkum, &ctx);
if (!memcmp(compute_checkum, store_data_decrypted + datalen - 20, 20))
{
cracked[index+j] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
}
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
saved_len[index] =
strnzcpyn(saved_key[index], key, sizeof(saved_key[index]));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_bks = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
noatomic.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
main() {
float *x,*y,*work1,*work2;
float sum;
int *index;
int n,i;
n=1000;
x=(float*)malloc(n*sizeof(float));
y=(float*)malloc(n*sizeof(float));
work1=(float*)malloc(n*sizeof(float));
work2=(float*)malloc(n*sizeof(float));
index=(int*)malloc(n*sizeof(float));
srand((unsigned) n);
for( i=0;i < n;i++) {
// index[i]=(n-i)-1;
index[i]=(rand() % (n/10));
x[i]=0.0;
y[i]=0.0;
work1[i]=i;
work2[i]=i*i;
}
sum=0;
#pragma omp parallel for shared(x,y,index,n,sum)
for( i=0;i< n;i++) {
//#pragma omp atomic
x[index[i]] += work1[i];
//#pragma omp atomic
sum+= work1[i];
y[i] += work2[i];
}
for( i=0;i < n;i++)
printf("%d %g %g\n",i,x[i],y[i]);
printf("sum %g\n",sum);
}
|
SpatialAveragePooling.c | #include <math.h>
#include "../thnets.h"
int nnload_SpatialAveragePooling(struct module *mod, struct nnmodule *n)
{
struct table *t = n->table;
mod->type = MT_SpatialAveragePooling;
mod->updateOutput = nn_SpatialAveragePooling_updateOutput;
struct SpatialAveragePooling *m = &mod->SpatialAveragePooling;
m->padW = TableGetNumber(t, "padW");
m->padH = TableGetNumber(t, "padH");
m->dW = TableGetNumber(t, "dW");
m->dH = TableGetNumber(t, "dH");
m->kW = TableGetNumber(t, "kW");
m->kH = TableGetNumber(t, "kH");
m->ceil_mode = TableGetNumber(t, "ceil_mode");
m->count_include_pad= TableGetNumber(t, "count_include_pad");
return 0;
}
void pyload_SpatialAveragePooling(struct pyfunction *f)
{
struct SpatialAveragePooling *p = &f->module.SpatialAveragePooling;
f->module.updateOutput = nn_SpatialAveragePooling_updateOutput;
f->module.type = MT_SpatialAveragePooling;
struct pyelement *el;
if( (el = findelement(f->params, "padding", 0)) && el->type == ELTYPE_INTVECT)
{
p->padH = el->ivect[0];
p->padW = el->ivect[1];
}
if( (el = findelement(f->params, "stride", 0)) && el->type == ELTYPE_INTVECT)
{
p->dH = el->ivect[0];
p->dW = el->ivect[1];
}
if( (el = findelement(f->params, "kernel_size", 0)) && el->type == ELTYPE_INTVECT)
{
p->kH = el->ivect[0];
p->kW = el->ivect[1];
}
if( (el = findelement(f->params, "ceil_mode", 0)) && el->type == ELTYPE_INT)
p->ceil_mode = el->ivalue;
}
#ifdef ONNX
void onnxload_SpatialAveragePooling(const void *graph, struct module *m, int nodeidx)
{
int naxes = onnx_getint(graph, nodeidx, "axes", -2);
if( !(naxes == 0 || (naxes == 2 && onnx_getint(graph, nodeidx, "axes", 0) == 2 && onnx_getint(graph, nodeidx, "axes", 1) == 3)))
THError("ReduceMean along channel is not supported\n");
m->updateOutput = nn_SpatialAveragePooling_updateOutput;
m->type = MT_SpatialAveragePooling;
struct SpatialAveragePooling *p = &m->SpatialAveragePooling;
p->kH = onnx_getint(graph, nodeidx, "kernel_shape", 0);
p->kW = onnx_getint(graph, nodeidx, "kernel_shape", 1);
p->padH = onnx_getint(graph, nodeidx, "pads", 0);
p->padW = onnx_getint(graph, nodeidx, "pads", 1);
p->padH2 = onnx_getint(graph, nodeidx, "pads", 2);
p->padW2 = onnx_getint(graph, nodeidx, "pads", 3);
p->dH = onnx_getint(graph, nodeidx, "strides", 0);
p->dW = onnx_getint(graph, nodeidx, "strides", 1);
if(p->dH == 0)
p->dH = 1;
if(p->dW == 0)
p->dW = 1;
p->ceil_mode = 0;
}
#endif
THFloatTensor *nn_SpatialAveragePooling_updateOutput(struct module *module, THFloatTensor *input)
{
int kW = module->SpatialAveragePooling.kW;
int kH = module->SpatialAveragePooling.kH;
int dW = module->SpatialAveragePooling.dW;
int dH = module->SpatialAveragePooling.dH;
int padW = module->SpatialAveragePooling.padW;
int padH = module->SpatialAveragePooling.padH;
int ceil_mode = module->SpatialAveragePooling.ceil_mode;
int count_include_pad = module->SpatialAveragePooling.count_include_pad;
THFloatTensor *output = module->output;
float *output_data;
float *input_data;
int dimw = 2;
int dimh = 1;
int dimc = 0;
long nbatch = 1;
long inputWidth;
long inputHeight;
long outputWidth;
long outputHeight;
long nInputPlane; // number of channels (or colors)
long k;
if(! (input->nDimension == 3 || input->nDimension == 4) )
THError("3D or 4D (batch mode) tensor expected");
if(! (kW/2 >= padW && kH/2 >= padH) )
THError("pad should be smaller than half of kernel size");
if (input->nDimension == 4)
{
nbatch = input->size[0];
dimw++;
dimh++;
dimc++;
}
inputWidth = input->size[dimw];
inputHeight = input->size[dimh];
nInputPlane = input->size[dimc];
if(kW == 0)
kW = inputWidth;
if(kH == 0)
kH = inputHeight;
if(ceil_mode)
{
outputWidth = (long)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1;
outputHeight = (long)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1;
}
else
{
outputWidth = (long)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1;
outputHeight = (long)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((outputHeight - 1)*dH >= inputHeight + padH)
--outputHeight;
if ((outputWidth - 1)*dW >= inputWidth + padW)
--outputWidth;
}
if( !(inputWidth >= kW - 2 * padW && inputHeight >= kH - 2 * padH) )
THError("input image smaller than kernel size");
if (input->nDimension == 3)
THFloatTensor_resize3d(output, nInputPlane, outputHeight, outputWidth);
else
THFloatTensor_resize4d(output, input->size[0], nInputPlane, outputHeight, outputWidth);
THFloatTensor *input2 = THFloatTensor_new();
THFloatTensor_resizeAs(input2, input);
THFloatTensor_copy(input2, input);
input = input2;
input_data = THFloatTensor_data(input);
output_data = THFloatTensor_data(output);
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane; k++)
{
long p;
for(p = 0; p < nbatch; p++)
{
long xx, yy;
/* For all output pixels... */
float *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight;
float *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight;
long i;
for(i = 0; i < outputWidth*outputHeight; i++)
ptr_output[i] = 0;
for(yy = 0; yy < outputHeight; yy++)
{
for(xx = 0; xx < outputWidth; xx++)
{
/* Compute the mean of the input image... */
long hstart = yy * dH - padH;
long wstart = xx * dW - padW;
long hend = fminf(hstart + kH, inputHeight + padH);
long wend = fminf(wstart + kW, inputWidth + padW);
long pool_size = (hend - hstart) * (wend - wstart);
hstart = fmaxf(hstart, 0);
wstart = fmaxf(wstart, 0);
hend = fminf(hend, inputHeight);
wend = fminf(wend, inputWidth);
float sum = 0;
long divide_factor;
if(count_include_pad)
divide_factor = pool_size;
else
divide_factor = (hend - hstart) * (wend - wstart);
long kx, ky;
for(ky = hstart; ky < hend; ky++)
{
for(kx = wstart; kx < wend; kx++)
sum += ptr_input[ky*inputWidth + kx];
}
/* Update output */
*ptr_output++ += sum/divide_factor;
}
}
}
}
THFloatTensor_free(input);
return output;
}
|
for-3.c | void bar (int);
int a[256];
void
foo (int j)
{
int i;
#pragma omp for
for (i = 0; i != 64; i = i + 4) /* { dg-error "increment is not constant 1 or -1" } */
bar (i);
#pragma omp for
for (i = 128; i != 64; i = i - 4) /* { dg-error "increment is not constant 1 or -1" } */
bar (i);
#pragma omp for
for (i = 0; i != 64; i = j + i) /* { dg-error "increment is not constant 1 or -1" } */
bar (i);
#pragma omp for
for (i = 128; i != 64; i = -16 + i) /* { dg-error "increment is not constant 1 or -1" } */
bar (i);
#pragma omp for
for (i = 0; i != 64; i += j) /* { dg-error "increment is not constant 1 or -1" } */
bar (i);
#pragma omp for
for (i = 128; i != 64; i -= 8) /* { dg-error "increment is not constant 1 or -1" } */
bar (i);
#pragma omp single
{
#pragma omp simd
for (i = 0; i != 64; i = i + 16) /* { dg-error "increment is not constant 1 or -1" } */
a[i] = a[i] + 1;
#pragma omp simd
for (i = 128; i != 64; i = i - 2) /* { dg-error "increment is not constant 1 or -1" } */
a[i] = a[i] + 1;
#pragma omp simd
for (i = 0; i != 64; i = j + i) /* { dg-error "increment is not constant 1 or -1" } */
a[i] = a[i] + 1;
#pragma omp simd
for (i = 128; i != 64; i = -j + i) /* { dg-error "increment is not constant 1 or -1" } */
a[i] = a[i] + 1;
#pragma omp simd
for (i = 0; i != 64; i += 8) /* { dg-error "increment is not constant 1 or -1" } */
a[i] = a[i] + 1;
#pragma omp simd
for (i = 128; i != 64; i -= j) /* { dg-error "increment is not constant 1 or -1" } */
a[i] = a[i] + 1;
}
}
|
8580.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
{
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
for (i = 0; i < _PB_N; i++)
{
#pragma omp target teams distribute schedule(dynamic, 14)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
for (j1 = 0; j1 < _PB_M; j1++)
{
#pragma omp target teams distribute schedule(dynamic, 14)
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
shear.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The XShearImage() and YShearImage() methods are based on the paper "A Fast
% Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics
% Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/shear.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% MagickBooleanType CropToFitImage(Image **image,
% const double x_shear,const double x_shear,
% const double width,const double height,
% const MagickBooleanType rotate,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CropToFitImage(Image **image,
const double x_shear,const double y_shear,
const double width,const double height,
const MagickBooleanType rotate,ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
register ssize_t
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=(ssize_t) ceil(min.x-0.5);
geometry.y=(ssize_t) ceil(min.y-0.5);
geometry.width=(size_t) floor(max.x-min.x+0.5);
geometry.height=(size_t) floor(max.y-min.y+0.5);
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The result will be auto-croped if the artifact "deskew:auto-crop" is
% defined, while the amount the image is to be deskewed, in degrees is also
% saved as the artifact "deskew:angle".
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void RadonProjection(const Image *image,MatrixInfo *source_matrixs,
MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection)
{
MatrixInfo
*swap;
register MatrixInfo
*p,
*q;
register ssize_t
x;
size_t
step;
p=source_matrixs;
q=destination_matrixs;
for (step=1; step < GetMatrixColumns(p); step*=2)
{
for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step)
{
register ssize_t
i;
ssize_t
y;
unsigned short
element,
neighbor;
for (i=0; i < (ssize_t) step; i++)
{
for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) GetMatrixRows(p); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_number_threads(image,image,GetMatrixColumns(p),1)
#endif
for (x=0; x < (ssize_t) GetMatrixColumns(p); x++)
{
register ssize_t
y;
size_t
sum;
sum=0;
for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++)
{
ssize_t
delta;
unsigned short
element,
neighbor;
if (GetMatrixElement(p,x,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse)
continue;
delta=(ssize_t) element-(ssize_t) neighbor;
sum+=delta*delta;
}
projection[GetMatrixColumns(p)+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,size_t *projection,ExceptionInfo *exception)
{
CacheView
*image_view;
MatrixInfo
*destination_matrixs,
*source_matrixs;
MagickBooleanType
status;
size_t
count,
width;
ssize_t
j,
y;
unsigned char
c;
unsigned short
bits[256];
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
destination_matrixs=AcquireMatrixInfo(width,image->rows,
sizeof(unsigned short),exception);
if ((source_matrixs == (MatrixInfo *) NULL) ||
(destination_matrixs == (MatrixInfo *) NULL))
{
if (destination_matrixs != (MatrixInfo *) NULL)
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
if (source_matrixs != (MatrixInfo *) NULL)
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
if (NullMatrix(source_matrixs) == MagickFalse)
{
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
for (j=0; j < 256; j++)
{
c=(unsigned char) j;
for (count=0; c != 0; c>>=1)
count+=c & 0x01;
bits[j]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(ssize_t) (image->columns+7)/8;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,-1,projection);
(void) NullMatrix(source_matrixs);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,1,projection);
image_view=DestroyCacheView(image_view);
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const ssize_t offset,
ExceptionInfo *exception)
{
CacheView
*image_view;
PixelInfo
background;
double
count;
ssize_t
y;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetPixelInfo(image,&background);
count=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if ((y >= offset) && (y < ((ssize_t) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x >= offset) && (x < ((ssize_t) image->columns-offset)))
continue;
background.red+=QuantumScale*GetPixelRed(image,p);
background.green+=QuantumScale*GetPixelGreen(image,p);
background.blue+=QuantumScale*GetPixelBlue(image,p);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
background.alpha+=QuantumScale*GetPixelAlpha(image,p);
count++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=(double) ClampToQuantum(QuantumRange*
background.red/count);
image->background_color.green=(double) ClampToQuantum(QuantumRange*
background.green/count);
image->background_color.blue=(double) ClampToQuantum(QuantumRange*
background.blue/count);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->background_color.alpha=(double) ClampToQuantum(QuantumRange*
background.alpha/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
AffineMatrix
affine_matrix;
const char
*artifact;
double
degrees;
Image
*clone_image,
*crop_image,
*deskew_image,
*median_image;
MagickBooleanType
status;
RectangleInfo
geometry;
register ssize_t
i;
size_t
max_projection,
*projection,
width;
ssize_t
skew;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (size_t *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(size_t *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
skew=0;
for (i=0; i < (ssize_t) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(ssize_t) width+1;
max_projection=projection[i];
}
}
projection=(size_t *) RelinquishMagickMemory(projection);
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Deskew angle: %g",degrees);
/*
Deskew image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
{
char
angle[MagickPathExtent];
(void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees);
(void) SetImageArtifact(clone_image,"deskew:angle",angle);
}
(void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod,
exception);
affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0))));
affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.tx=0.0;
affine_matrix.ty=0.0;
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (IsStringTrue(artifact) == MagickFalse)
{
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
return(deskew_image);
}
/*
Auto-crop image.
*/
GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact),
exception);
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
if (deskew_image == (Image *) NULL)
return((Image *) NULL);
median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception);
if (median_image == (Image *) NULL)
{
deskew_image=DestroyImage(deskew_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
crop_image=CropImage(deskew_image,&geometry,exception);
deskew_image=DestroyImage(deskew_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,size_t rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations,
ExceptionInfo *exception)
{
#define RotateImageTag "Rotate/Image"
CacheView
*image_view,
*rotate_view;
Image
*rotate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
if (rotations == 0)
return(CloneImage(image,0,0,MagickTrue,exception));
if ((rotations == 1) || (rotations == 3))
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
else
rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
rotate_view=AcquireAuthenticCacheView(rotate_image,exception);
switch (rotations)
{
case 1:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 90 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t)
(rotate_image->columns-(tile_y+height)),y+tile_x,height,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,tile_pixels) <= (QuantumRange/2))
{
tile_pixels-=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels-=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
register ssize_t
y;
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y-
1),image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(rotate_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(rotate_image);
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
proceed=SetImageProgress(image,RotateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 270 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+
rotate_image->rows-(tile_x+width)),height,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((width-1)-y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,tile_pixels) <= (QuantumRange/2))
{
tile_pixels+=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels+=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
default:
break;
}
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType XShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
y;
/*
X shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
background=image->background_color;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelInfo
pixel,
source,
destination;
double
area,
displacement;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=x_offset*GetPixelChannels(image);
displacement=degrees*(double) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
if ((x_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (x_offset+width+step-i) > image->columns)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_XShearImage)
#endif
proceed=SetImageProgress(image,XShearImageTag,progress++,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType YShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
x;
/*
Y Shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
background=image->background_color;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,width,1)
#endif
for (x=0; x < (ssize_t) width; x++)
{
ssize_t
step;
double
area,
displacement;
PixelInfo
pixel,
source,
destination;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=y_offset*GetPixelChannels(image);
displacement=degrees*(double) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
if ((y_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (y_offset+height+step-i) > image->rows)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_YShearImage)
#endif
proceed=SetImageProgress(image,YShearImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
MagickBooleanType
status;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0))));
shear.y=tan(DegreesToRadians(fmod(y_shear,360.0)));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute image size.
*/
bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5);
bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)-
image->columns)/2.0-0.5);
bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)-
image->rows)/2.0-0.5);
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
shear_image=BorderImage(integral_image,&border_info,image->compose,exception);
integral_image=DestroyImage(integral_image);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Shear the image.
*/
if (shear_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception);
status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x,
(ssize_t) (shear_image->rows-image->rows)/2,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t)
(shear_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType)
image->columns,(MagickRealType) image->rows,MagickFalse,exception);
shear_image->alpha_trait=image->alpha_trait;
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
if (status == MagickFalse)
shear_image=DestroyImage(shear_image);
return(shear_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearRotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. ShearRotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% ShearRotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a
% similar method based on the Paeth paper written by Michael Halle of the
% Spatial Imaging Group, MIT Media Lab.
%
% The format of the ShearRotateImage method is:
%
% Image *ShearRotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearRotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
MagickBooleanType
status;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
size_t
height,
rotations,
shear_width,
width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=degrees-360.0*(ssize_t) (degrees/360.0);
if (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute maximum bounds for 3 shear operations.
*/
width=integral_image->columns;
height=integral_image->rows;
bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5);
bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5);
shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+
bounds.width+0.5);
bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width :
bounds.width-shear_width+2)/2.0+0.5);
bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5);
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
rotate_image=BorderImage(integral_image,&border_info,image->compose,
exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t)
(rotate_image->rows-height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t)
(rotate_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t)
(rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows-
bounds.height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
rotate_image->alpha_trait=image->alpha_trait;
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
|
declare_variant_messages.c | // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
#pragma omp declare // expected-error {{expected an OpenMP directive}}
int foo(void);
#pragma omp declare variant // expected-error {{expected '(' after 'declare variant'}}
#pragma omp declare variant( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo // expected-error {{expected ')'}} expected-error {{expected 'match' clause on 'omp declare variant' directive}} expected-note {{to match this '('}}
#pragma omp declare variant(x) // expected-error {{use of undeclared identifier 'x'}} expected-error {{expected 'match' clause on}}
#pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) xxx // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) match // expected-error {{expected '(' after 'match'}}
#pragma omp declare variant(foo) match( // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match() // expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=yyy) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=yyy}) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(xxx={) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv, vvv}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv} xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv}) xxx // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(implementation={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'implementation'; selector ignored}} expected-note {{context selector options are: 'vendor' 'extension' 'unified_address' 'unified_shared_memory' 'reverse_offload' 'dynamic_allocators' 'atomic_default_mem_order'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor}) // expected-warning {{the context selector 'vendor' in context set 'implementation' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}}
#pragma omp declare variant(foo) match(implementation={vendor(score ibm)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}}
#pragma omp declare variant(foo) match(implementation={vendor(score( ibm)}) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor(score(2 ibm)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), vendor(llvm)}) // expected-warning {{the context selector 'vendor' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'vendor' used here}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), kind(cpu)}) // expected-warning {{the context selector 'kind' is not valid for the context set 'implementation'; selector ignored}} expected-note {{the context selector 'kind' can be nested in the context set 'device'; try 'match(device={kind(property)})'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'device'; selector ignored}} expected-note {{context selector options are: 'kind' 'isa' 'arch'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind}) // expected-warning {{the context selector 'kind' in context set 'device' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}}
#pragma omp declare variant(foo) match(device={kind(score cpu)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<invalid>'); score ignored}}
#pragma omp declare variant(foo) match(device = {kind(score(ibm) }) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<recovery-expr>()'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind(score(2 gpu)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('2'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}} expected-warning {{'ibm' is not a valid context property for the context selector 'kind' and the context set 'device'; property ignored}} expected-note {{try 'match(implementation={vendor(ibm)})'}} expected-note {{the ignored property spans until here}}
#pragma omp declare variant(foo) match(device={kind(score(5): host), kind(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'kind' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'kind' used here}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind(score(5): nohost), vendor(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'vendor' is not valid for the context set 'device'; selector ignored}} expected-note {{the context selector 'vendor' can be nested in the context set 'implementation'; try 'match(implementation={vendor(property)})'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={extension("aaa")}) // expected-warning {{'aaa' is not a valid context property for the context selector 'extension' and the context set 'implementation'; property ignored}} expected-note {{context property options are: 'match_all' 'match_any' 'match_none'}} expected-note {{the ignored property spans until here}}
int bar(void);
#pragma omp declare variant(foo) match(implementation = {vendor(score(foo) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation = {vendor(score(foo()) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation = {vendor(score(<expr>) :llvm)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}}
#pragma omp declare variant(foo) match(user = {condition(foo)}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo is not}}
#pragma omp declare variant(foo) match(user = {condition(foo())}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo() is not}}
#pragma omp declare variant(foo) match(user = {condition(<expr>)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} expected-note {{the ignored selector spans until here}}
int score_and_cond_non_const();
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int a; // expected-error {{'#pragma omp declare variant' can only be applied to functions}}
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp threadprivate(a) // expected-error {{'#pragma omp declare variant' can only be applied to functions}}
int var;
#pragma omp threadprivate(var)
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare // expected-error {{expected an OpenMP directive}}
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma options align=packed
int main();
#pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma init_seg(compiler)
int main();
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{single declaration is expected after 'declare variant' directive}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int b, c;
int no_proto();
#pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int no_proto_too();
int proto1(int);
#pragma omp declare variant(proto1) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int diff_proto(); // expected-note {{previous declaration is here}}
int diff_proto(double); // expected-error {{conflicting types for 'diff_proto'}}
#pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int diff_proto1(double);
int after_use_variant(void);
int after_use();
int bar() {
return after_use();
}
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied for function after first usage; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int after_use(void);
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int defined(void) { return 0; }
int defined1(void) { return 0; }
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied to the function that was defined already; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int defined1(void);
int diff_cc_variant(void);
#pragma omp declare variant(diff_cc_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'int (void) __attribute__((vectorcall))'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
__vectorcall int diff_cc(void);
int diff_ret_variant(void);
#pragma omp declare variant(diff_ret_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'void (void)'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
void diff_ret(void);
void marked(void);
void not_marked(void);
#pragma omp declare variant(not_marked) match(implementation={vendor(unknown)}, device={kind(cpu)}) // expected-note {{marked as 'declare variant' here}}
void marked_variant(void);
#pragma omp declare variant(marked_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{variant function in '#pragma omp declare variant' is itself marked as '#pragma omp declare variant'}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
void marked(void);
#pragma omp declare variant(foo) match(device = {isa("foo")})
int unknown_isa_trait(void);
#pragma omp declare variant(foo) match(device = {isa(foo)})
int unknown_isa_trait2(void);
#pragma omp declare variant(foo) match(device = {kind(fpga), isa(bar)})
int ignored_isa_trait(void);
void caller() {
unknown_isa_trait(); // expected-warning {{isa trait 'foo' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}}
unknown_isa_trait2(); // expected-warning {{isa trait 'foo' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}}
ignored_isa_trait();
}
#pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}}
// FIXME: If the scores are equivalent we should detect that and allow it.
#pragma omp begin declare variant match(implementation = {vendor(score(2) \
: llvm)})
#pragma omp declare variant(foo) match(implementation = {vendor(score(2) \
: llvm)}) // expected-error@-1 {{nested OpenMP context selector contains duplicated trait 'llvm' in selector 'vendor' and set 'implementation' with different score}}
int conflicting_nested_score(void);
#pragma omp end declare variant
// FIXME: We should build the conjuction of different conditions, see also the score fixme above.
#pragma omp begin declare variant match(user = {condition(1)})
#pragma omp declare variant(foo) match(user = {condition(1)}) // expected-error {{nested user conditions in OpenMP context selector not supported (yet)}}
int conflicting_nested_condition(void);
#pragma omp end declare variant
|
GB_binop__isne_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int8)
// A*D function (colscale): GB (_AxD__isne_int8)
// D*A function (rowscale): GB (_DxB__isne_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int8)
// C=scalar+B GB (_bind1st__isne_int8)
// C=scalar+B' GB (_bind1st_tran__isne_int8)
// C=A+scalar GB (_bind2nd__isne_int8)
// C=A'+scalar GB (_bind2nd_tran__isne_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT8 || GxB_NO_ISNE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
syrk.c | /**
* syrk.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include "BenchmarksUtil.h"
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
/* Problem size */
#define N SIZE
#define M SIZE
/* Declared constant values for alpha and beta */
/* (same as values in PolyBench 2.0) */
#define alpha 12435
#define beta 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array_A(DATA_TYPE *A) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < M; j++) {
A[i * M + j] = ((DATA_TYPE)i * j) / N;
}
}
}
void init_array_C(DATA_TYPE *C) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < M; j++) {
C[i * M + j] = ((DATA_TYPE)i * j + 2) / N;
}
}
}
int compareResults(DATA_TYPE *C, DATA_TYPE *C_OMP) {
int i, j, fail;
fail = 0;
// Compare C with D
for (i = 0; i < N; i++) {
for (j = 0; j < M; j++) {
if (percentDiff(C[i * M + j], C_OMP[i * M + j]) > ERROR_THRESHOLD) {
fail++;
}
}
}
return fail;
}
void syrk(DATA_TYPE *A, DATA_TYPE *C) {
int i, j, k;
for (i = 0; i < N; i++) {
for (j = 0; j < M; j++) {
C[i * M + j] *= beta;
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < M; j++) {
for (k = 0; k < M; k++) {
C[i * N + j] += alpha * A[i * M + k] * A[j * M + k];
}
}
}
}
void syrkOMP(DATA_TYPE *A, DATA_TYPE *C) {
#pragma omp target teams map(to : A[:N*M]) map(tofrom : C[:N*M]) device(OMP_DEVICE_ID)
{
#pragma omp distribute parallel for collapse(2)
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
C[i * M + j] *= beta;
}
}
#pragma omp distribute parallel for collapse(2)
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
for (int k = 0; k < M; k++) {
C[i * N + j] += alpha * A[i * M + k] * A[j * M + k];
}
}
}
}
}
int main() {
fprintf(stdout, "<< Symmetric rank-k operations >>\n");
// declare arrays and allocate memory for common arrays
DATA_TYPE *A = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE));
DATA_TYPE *C = NULL;
DATA_TYPE *C_OMP = NULL;
// init array A
init_array_A(A);
// run OMP on GPU or CPU if enabled
#if defined(RUN_OMP_GPU) || defined(RUN_OMP_CPU)
C_OMP = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE));
init_array_C(C_OMP);
BENCHMARK_OMP(syrkOMP(A, C_OMP));
// prevent dead-code elimination
DCE_PREVENT(C_OMP, N*M);
#endif
// run sequential version if enabled
#ifdef RUN_CPU_SEQ
C = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE));
init_array_C(C);
BENCHMARK_CPU(syrk(A, C));
// prevent dead-code elimination
DCE_PREVENT(C, N*M);
#endif
int fail = 0;
// if TEST is enabled, then compare OMP results against sequential mode
#ifdef RUN_TEST
fail = compareResults(C, C_OMP);
printf("Errors on OMP (threshold %4.2lf): %d\n", ERROR_THRESHOLD, fail);
#endif
// release memory
free(A);
free(C);
free(C_OMP);
return fail;
}
|
GB_unop__abs_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_bool_bool
// op(A') function: GB_unop_tran__abs_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_bool_bool
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
// HLSL Change Starts
#include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant
namespace hlsl {
struct UnusualAnnotation;
}
// HLSL Change Ends
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
class InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class ExternalSemaSource;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPClause;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
if (getLangOpts().ModulesHideInternalLinkage)
return isVisible(Old) || New->isExternallyVisible();
return true;
}
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
/// PackContext - Manages the stack for \#pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// \brief Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
// HLSL Change Begin - pragma pack_matrix.
// Add both row/col to identify the default case which no pragma.
bool PackMatrixRowMajorPragmaOn = false; // True when \#pragma pack_matrix(row_major) on.
bool PackMatrixColMajorPragmaOn = false; // True when \#pragma pack_matrix(column_major) on.
// HLSL Change End.
enum PragmaVtorDispKind {
PVDK_Push, ///< #pragma vtordisp(push, mode)
PVDK_Set, ///< #pragma vtordisp(mode)
PVDK_Pop, ///< #pragma vtordisp(pop)
PVDK_Reset ///< #pragma vtordisp()
};
enum PragmaMsStackAction {
PSK_Reset, // #pragma ()
PSK_Set, // #pragma ("name")
PSK_Push, // #pragma (push[, id])
PSK_Push_Set, // #pragma (push[, id], "name")
PSK_Pop, // #pragma (pop[, id])
PSK_Pop_Set, // #pragma (pop[, id], "name")
};
/// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
///
/// The stack always has at least one element in it.
SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
Slot(llvm::StringRef StackSlotLabel,
ValueType Value,
SourceLocation PragmaLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
explicit PragmaStack(const ValueType &Value)
: CurrentValue(Value) {}
SmallVector<Slot, 2> Stack;
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// ExprNeedsCleanups - True if the current evaluation context
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// \brief Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// \brief Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// \brief Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
// std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// \brief counter for internal MS Asm label names.
unsigned MSAsmLabelNameCounter;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
bool ParentNeedsCleanups;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// \brief The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// \brief The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
bool ParentNeedsCleanups,
Decl *ManglingContextDecl,
bool IsDecltype)
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering() { }
/// \brief Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == Unevaluated || Context == UnevaluatedAbstract;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// \brief Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
void ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema& S)
: S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
~FPContractStateRAII() {
S.FPFeatures.fp_contract = OldFPContractState;
}
private:
Sema& S;
bool OldFPContractState : 1;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// \brief Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// \brief Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// \brief This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size()-1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// \brief Retrieve the current lambda scope info, if any.
sema::LambdaScopeInfo *getCurLambda();
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
unsigned deduceWeakPropertyFromType(QualType T) {
if ((getLangOpts().getGC() != LangOptions::NonGC &&
T.isObjCGCWeak()) ||
(getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_Weak))
return ObjCDeclSpec::DQ_PR_weak;
return 0;
}
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc,
bool *MissingExceptionSpecification = nullptr,
bool *MissingEmptyExceptionSpecification = nullptr,
bool AllowNoexceptAllMatchWithNoSpec = false,
bool IsOperatorNew = false);
bool CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Superset, SourceLocation SuperLoc,
const FunctionProtoType *Subset, SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
const FunctionProtoType *Target, SourceLocation TargetLoc,
const FunctionProtoType *Source, SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
bool Suppressed;
TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
if (Suppressed)
return;
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
VisibleModuleSet VisibleModules;
llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack;
Module *CachedFakeTopLevelModule;
public:
/// \brief Get the module owning an entity.
Module *getOwningModule(Decl *Entity);
/// \brief Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc);
bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
bool hasVisibleMergedDefinition(NamedDecl *Def);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
/// List of decls defined in a function prototype. This contains EnumConstants
/// that incorrectly end up in translation unit scope because there is no
/// function to pin them on. ActOnFunctionDeclarator reads this list and patches
/// them into the FunctionDecl.
std::vector<NamedDecl*> DeclsInPrototypeScope;
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false,
bool HasTrailingDot = false,
ParsedType ObjectType = ParsedType(),
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool AllowClassTemplates = false);
/// \brief For compatibility with MSVC, we delay parsing of some default
/// template type arguments until instantiation time. Emits a warning and
/// returns a synthesized DependentNameType that isn't really dependent on any
/// other template arguments.
ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II,
SourceLocation NameLoc);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
const IdentifierInfo *Keyword;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword)
: Kind(NC_Keyword), Keyword(Keyword) { }
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
void CheckShadow(Scope *S, VarDecl *D);
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
// HLSL Change Starts
// This enumeration is used to determine whether a variable declaration
// should shadow a prior declaration rather than merging.
enum ShadowMergeState {
ShadowMergeState_Disallowed, // shadowing is not allowed
ShadowMergeState_Possible, // shadowing is possible (but may not occur)
ShadowMergeState_Effective // the declaration should shadow a prior one
};
// HLSL Change Ends
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
void CheckVariableDeclarationType(VarDecl *NewVD);
void CheckCompleteVariableDeclaration(VarDecl *var);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
bool TypeMayContainAuto);
void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group,
bool TypeMayContainAuto = true);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(FunctionDecl *FD,
const FunctionDecl *EffectiveDefinition =
nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineMethodDef(CXXMethodDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
ParmVarDecl * const *End);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
ParmVarDecl * const *End,
QualType ReturnTy,
NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// \brief Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument
};
/// \brief Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
bool NeedDefinition, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation = false);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
struct SkipBodyInfo {
SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {}
bool ShouldSkip;
NamedDecl *Previous;
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
typedef void *SkippedDefinitionContext;
/// \brief Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceLocation RBraceLoc);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool Override,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override
};
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsNoReturnConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr ///< Constant expression in a noptr-new-declarator.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
// Emit as a series of 'note's all template and non-templates
// identified by the expression Expr
void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType());
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
const SourceRange& OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
// An enum to represent whether something is dealing with a call to begin()
// or a call to end() in a range-based for loop.
enum BeginEndFunction {
BEF_begin,
BEF_end
};
ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
SourceLocation RangeLoc,
VarDecl *Decl,
BeginEndFunction BEF,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ParmVarDecl *const *Param,
ParmVarDecl *const *ParamEnd,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT;
TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT;
};
/// \brief The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// \brief Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// \brief Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions,
DeclAccessPair Operator,
QualType T1, QualType T2);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// \brief Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
bool *isOverridingProperty,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// \brief - Returns instance or factory methods in global method pool for
/// given selector. If no such method or only one method found, function returns
/// false; otherwise, it returns true
bool CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool instance);
bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R,
bool receiverIdOrClass);
void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// \brief - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance);
/// \brief Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
StmtResult ActOnIfStmt(SourceLocation IfLoc,
FullExprArg CondVal, Decl *CondVar,
Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Expr *Cond,
Decl *CondVar);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
FullExprArg Cond,
Decl *CondVar, Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc,
SourceLocation CondLParen, Expr *Cond,
SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First, FullExprArg Second,
Decl *SecondVar,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEndDecl,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowFunctionParameters);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowFunctionParameters);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
llvm::InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial };
void EmitAvailabilityWarning(AvailabilityDiagnostic AD,
NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess);
bool makeUnavailableInSystemHeader(SourceLocation loc,
StringRef message);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=nullptr,
bool ObjCPropertyAccess=false);
void NoteDeletedFunction(FunctionDecl *FD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool OdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult BuildQualifiedDeclarationNameExpr(
CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
const SourceRange &ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
// HLSL Change Starts
//===---------------------------- HLSL Features -------------------------===//
/// cbuffer/tbuffer
llvm::SmallVector<Decl*, 1> HLSLBuffers;
Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc,
IdentifierInfo *Ident, SourceLocation IdentLoc,
std::vector<hlsl::UnusualAnnotation *>& BufferAttributes,
SourceLocation LBrace);
void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace);
Decl* getActiveHLSLBuffer() const;
void ActOnStartHLSLBufferView();
bool IsOnHLSLBufferView();
Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc,
DeclGroupPtrTy &dcl, bool iscbuf);
// HLSL Change Ends
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
CXXRecordDecl *getStdBadAlloc() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const CXXConstructorDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
AttributeList *AttrList,
bool IsInstantiation,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
bool HasUsingKeyword,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
AttributeList *AttrList,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type,
Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_ComputedNoexcept;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// \brief Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// \brief Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Declare all inheriting constructors for the given class.
///
/// \param ClassDecl The class declaration into which the inheriting
/// constructors will be added.
void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// \brief Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer,
bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
DeclarationName Name, MultiExprArg Args,
DeclContext *Ctx,
bool AllowMissing, FunctionDecl *&Operator,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
QualType Param1,
QualType Param2 = QualType(),
bool addRestrictAttr = false);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
DeclarationName Name);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
bool ConvertToBoolean);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// \brief The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
SourceLocation IdLoc,
IdentifierInfo &II,
ParsedType ObjectType);
bool BuildCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
QualType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param Identifier The identifier preceding the '::'.
///
/// \param IdentifierLoc The location of the identifier.
///
/// \param CCLoc The location of the '::'.
///
/// \param ObjectType The type of the object, if we're parsing
/// nested-name-specifier in a member access expression.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
ParsedType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation ColonLoc,
ParsedType ObjectType,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params);
/// \brief Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
QualType performLambdaInitCaptureInitialization(SourceLocation Loc,
bool ByRef, IdentifierInfo *Id, Expr *&Init);
/// \brief Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType, IdentifierInfo *Id, Expr *Init);
/// \brief Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// \brief Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **Strings,
unsigned NumStrings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
ObjCDictionaryElement *Elements,
unsigned NumElements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = nullptr);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXMemberDefaultArgs(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
unsigned NumBases);
void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
unsigned NumBases);
bool IsDerivedFrom(QualType Derived, QualType Base);
bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
AbstractDiagSelID SelID = AbstractNone);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
Decl **Params, unsigned NumParams,
SourceLocation RAngleLoc);
/// \brief The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
TemplateIdAnnotation &TemplateId,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType);
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief The arguments included an overloaded function name that could
/// not be resolved to a suitable function.
TDK_FailedOverloadResolution,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType,
unsigned ArgIdx,
QualType OriginalArgType)
: OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) { }
QualType OriginalParamType;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult
FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
/// \brief Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
QualType &Result);
DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer,
QualType &Result);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// \brief A template instantiation that is currently in progress.
struct ActiveTemplateInstantiation {
/// \brief The kind of template instantiation we are performing
enum InstantiationKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template, and
/// TemplateArgs/NumTemplateArguments provides the template
/// arguments as specified.
/// FIXME: Use a TemplateArgumentList
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a ClassTemplatePartialSpecializationDecl or
/// a FunctionTemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation
} Kind;
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The entity that is being instantiated.
Decl *Entity;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
friend bool operator==(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
if (X.Kind != Y.Kind)
return false;
if (X.Entity != Y.Entity)
return false;
switch (X.Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
return true;
case PriorTemplateArgumentSubstitution:
case DefaultTemplateArgumentChecking:
return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs;
case DefaultTemplateArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case DefaultFunctionArgumentInstantiation:
return X.TemplateArgs == Y.TemplateArgs;
}
llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
return !(X == Y);
}
};
/// \brief List of active template instantiations.
///
/// This vector is treated as a stack. As one template instantiation
/// requires another template instantiation, additional
/// instantiations are pushed onto the stack up to a
/// user-configurable limit LangOptions::InstantiationDepth.
SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// \brief Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
unsigned NonInstantiationEntries;
/// \brief The last template from which a template instantiation
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant template
/// instantiation backtraces when there are multiple errors in the
/// same instantiation. FIXME: Does this belong in Sema? It's tough
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief The stack of calls expression undergoing template instantiation.
///
/// The top of this stack is used by a fixit instantiating unresolved
/// function calls to fix the AST to match the textual change it prints.
SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
private:
Sema &SemaRef;
bool Invalid;
bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(),
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void PrintInstantiationStack();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// \brief A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
class SavePendingInstantiationsAndVTableUsesRAII {
public:
SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
~SavePendingInstantiationsAndVTableUsesRAII() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class SavePendingLocalImplicitInstantiationsRAII {
public:
SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
~SavePendingLocalImplicitInstantiationsRAII() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param NumExprs The number of expressions in \p Exprs.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
const IdentifierLocPair *ProtocolId,
unsigned NumProtocols,
SmallVectorImpl<Decl *> &Protocols);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
/// \param CD The semantic container for the property
/// \param redeclaredProperty Declaration for property if redeclared
/// in class extension.
/// \param lexicalDC Container for redeclaredProperty.
void ProcessPropertyDecl(ObjCPropertyDecl *property,
ObjCContainerDecl *CD,
ObjCPropertyDecl *redeclaredProperty = nullptr,
ObjCContainerDecl *lexicalDC = nullptr);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
bool *OverridingProperty,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
enum PragmaPackKind {
PPK_Default, // #pragma pack([n])
PPK_Show, // #pragma pack(show), only supported by MSVC.
PPK_Push, // #pragma pack(push, [identifier], [n])
PPK_Pop // #pragma pack(pop, [identifier], [n])
};
enum PragmaMSStructKind {
PMSST_OFF, // #pragms ms_struct off
PMSST_ON // #pragms ms_struct on
};
enum PragmaMSCommentKind {
PCK_Unknown,
PCK_Linker, // #pragma comment(linker, ...)
PCK_Lib, // #pragma comment(lib, ...)
PCK_Compiler, // #pragma comment(compiler, ...)
PCK_ExeStr, // #pragma comment(exestr, ...)
PCK_User // #pragma comment(user, ...)
};
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(PragmaPackKind Kind,
IdentifierInfo *Name,
Expr *Alignment,
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...).
void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// \brief Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// \brief Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// \brief Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT
void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// \brief Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// \brief Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// \brief Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
// OpenMP directives and clauses.
private:
void *VarDataSharingAttributesStack;
/// \brief Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op,
OpenMPClauseKind CKind);
public:
/// \brief Check if the specified variable is used in a private clause in
/// Checks if the specified variable is used in one of the private
/// clauses in OpenMP constructs.
bool IsOpenMPCapturedVar(VarDecl *VD);
/// OpenMP constructs.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level);
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// \brief Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// \brief Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// \brief End analysis of clauses.
void EndOpenMPClause();
/// \brief Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// \brief Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// \brief End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
unsigned Argument, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ArgumentLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation KindLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'ordered' clause.
OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc);
/// \brief Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'reduction' clause.
OMPClause *
ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId);
/// \brief Called on well-formed 'linear' clause.
OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList,
Expr *Step,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointer - The assignment is between two pointers types which
/// point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and prepare for a conversion of the
/// RHS to the LHS type.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind);
// CheckSingleAssignmentConstraints - Currently used by
// CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
// this routine performs the default function/array converions.
AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
bool Diagnose = true,
bool DiagnoseCFAudited = false);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc,
bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = nullptr);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool *NonStandardCompositeType = nullptr) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
NonStandardCompositeType);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible_With_Added_Qualification - The two types are
/// reference-compatible with added qualification, meaning that
/// they are reference-compatible and the qualifiers on T1 (cv1)
/// are greater than the qualifiers on T2 (cv2).
Ref_Compatible_With_Added_Qualification,
/// Ref_Compatible - The two types are reference-compatible and
/// have equivalent qualifiers (cv1 == cv2).
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds.
ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc);
ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D);
bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
// HLSL Change Starts - checking array subscript access to vector or matrix member
void CheckHLSLArrayAccess(const Expr *expr);
// HLSL Change ends
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
ArrayRef<const Expr *> Args, bool IsMemberFunction,
SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinCpuSupports(CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
ArrayRef<const Expr *> Args, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
FormatStringType Type, bool inFunctionCall,
VariadicCallType CallType,
llvm::SmallBitVector &CheckedVarArgs);
bool FormatStringHasSArg(const StringLiteral *FExpr);
bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl,
IdentifierInfo *FnInfo);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// \brief Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
// HLSL Change Starts
bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter);
bool DiagnoseHLSLLookup(const LookupResult &R);
void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl);
// HLSL Change Ends
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
AvailabilityResult getCurContextAvailability() const;
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// \brief To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
~EnterExpressionEvaluationContext() {
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// \brief The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
#endif
|
ecryptfs_fmt_plug.c | /* Cracker for eCryptfs ~/.ecryptfs/wrapped-passphrase.
*
* We attack "login passphrase" instead of "mount passphrase" (and which could
* be 128-bit random key!).
*
* "ecryptfs_unwrap_passphrase -> generate_passphrase_sig" in
* src/libecryptfs/key_management.c is important.
*
* Do we need to do full decryption as done in "ecryptfs_unwrap_passphrase"?
* I believe, 8 bytes of verification data ought to be enough for anybody!
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_ecryptfs1;
#elif FMT_REGISTERS_H
john_register_one(&fmt_ecryptfs1);
#else
#include <string.h>
#include <errno.h>
#include "sha2.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "base64_convert.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 8 // XXX
#endif
#endif
#include "memdbg.h"
//#undef SIMD_COEF_64
#define FORMAT_TAG "$ecryptfs$"
#define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define FORMAT_LABEL "eCryptfs"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#define BENCHMARK_COMMENT " (65536x)" // good luck with that!
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define REAL_BINARY_SIZE 8
#define HEX_BINARY_SIZE (REAL_BINARY_SIZE*2)
#define BINARY_SIZE 64
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define GETPOS_512(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64 *8 )
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
/* taken from eCryptfs */
#define ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS 65536
#define ECRYPTFS_MAX_PASSWORD_LENGTH 64
#define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH
#define ECRYPTFS_SALT_SIZE 8
#define ECRYPTFS_SALT_SIZE_HEX (ECRYPTFS_SALT_SIZE*2)
#define ECRYPTFS_DEFAULT_SALT "\x00\x11\x22\x33\x44\x55\x66\x77"
#define ECRYPTFS_DEFAULT_SALT_HEX "0011223344556677"
#define ECRYPTFS_DEFAULT_SALT_FNEK_HEX "9988776655443322"
#define ECRYPTFS_SIG_SIZE 8
#define ECRYPTFS_SIG_SIZE_HEX (ECRYPTFS_SIG_SIZE*2)
#define ECRYPTFS_PASSWORD_SIG_SIZE ECRYPTFS_SIG_SIZE_HEX
#define ECRYPTFS_MAX_KEY_BYTES 64
#define ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES 512
#define ECRYPTFS_DEFAULT_IV_BYTES 16
static struct fmt_tests ecryptfs_tests[] = {
/* hash ==> first 16 bytes of ~/.ecryptfs/wrapped-passphrase */
{"$ecryptfs$0$92dc3db8feaf1676", "openwall"},
{"$ecryptfs$0$ccb515ee115be591", "failpassword"},
{"$ecryptfs$0$8acb10b9e061fcc7", "verylongbutstillfailpassword"},
/* fake hash to test custom salt handling */
{"$ecryptfs$0$1$0000000000000000$884ed410cd143bca", "fake"},
{"$ecryptfs$0$1$544c39674737716a$a8307a01b2d1b008", "fake"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int iterations; // really really unused (even in the original code)
int salt_length;
char unsigned salt[ECRYPTFS_SALT_SIZE + 1];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_align(sizeof(*crypt_out),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0)
return 0;
p = ciphertext + FORMAT_TAG_LENGTH;
if (*p != '0' || *(p + 1) != '$')
return 0;
p += 2;
if (*p == '1' && *(p + 1) == '$') {
// handle salted variety
p += 2;
if ( abs(hexlenl(p)) != HEX_BINARY_SIZE || p[HEX_BINARY_SIZE] != '$')
return 0;
p += (HEX_BINARY_SIZE+1);
}
return hexlenl(p) == HEX_BINARY_SIZE && !p[HEX_BINARY_SIZE];
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
int i;
char *p, *q;
memset(&cs, 0, SALT_SIZE);
p = ciphertext + FORMAT_TAG_LENGTH;
p = p + 2; // skip over "0$"
/* support for custom salt */
if (*p == '1' && *(p + 1) == '$') {
p = p + 2;
q = strchr(p, '$');
cs.salt_length = (q - p) / 2;
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) |
atoi16[ARCH_INDEX(p[2 * i + 1])];
} else {
memcpy(cs.salt, ECRYPTFS_DEFAULT_SALT, ECRYPTFS_SALT_SIZE);
}
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[REAL_BINARY_SIZE];
ARCH_WORD_32 dummy;
} buf;
unsigned char *out = buf.c;
int i;
char *p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < REAL_BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
int j;
SHA512_CTX ctx;
#ifdef SIMD_COEF_64
unsigned char tmpBuf[64];
unsigned int i;
unsigned char _IBuf[128*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys;
ARCH_WORD_64 *keys64;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys64 = (ARCH_WORD_64*)keys;
memset(keys, 0, 128*MAX_KEYS_PER_CRYPT);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA512_Init(&ctx);
SHA512_Update(&ctx, cur_salt->salt, ECRYPTFS_SALT_SIZE);
SHA512_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i]));
SHA512_Final((unsigned char *)tmpBuf, &ctx);
for (j = 0; j < 64; ++j)
keys[GETPOS_512(j, i)] = tmpBuf[j];
keys[GETPOS_512(j, i)] = 0x80;
// 64 bytes of crypt data (0x200 bits).
keys[GETPOS_512(126, i)] = 0x02;
}
for (j = 1; j < ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS; j++)
SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// Last one with FLAT_OUT
SIMDSHA512body(keys, (ARCH_WORD_64*)crypt_out[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT);
#else
SHA512_Init(&ctx);
SHA512_Update(&ctx, cur_salt->salt, ECRYPTFS_SALT_SIZE);
SHA512_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA512_Final((unsigned char *)crypt_out[index], &ctx);
/* now "h" (crypt_out[index] becomes our input, total SHA-512 calls => 65536 */
for (j = 1; j <= ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS; j++) {
SHA512_CTX ctx;
SHA512_Init(&ctx);
SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE);
SHA512_Final((unsigned char *)crypt_out[index], &ctx);
}
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], REAL_BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], REAL_BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void ecryptfs_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_ecryptfs1 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
REAL_BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
ecryptfs_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
ecryptfs_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
aix_smd5_fmt_plug.c | /* AIX smd5 cracker patch for JtR. Hacked together during April of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_smd5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_smd5);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 16 // tuned on i7 w/HT
#endif
#include "md5.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "aix-smd5"
#define FORMAT_NAME "AIX LPA {smd5} (modified crypt-md5)"
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests smd5_tests[] = {
/* following hashes are AIX non-standard smd5 hashes */
{"{smd5}s8/xSJ/v$uGam4GB8hOjTLQqvBfxJ2/", "password"},
{"{smd5}alRJaSLb$aKM3H1.h1ycXl5GEVDH1e1", "aixsucks?"},
{"{smd5}eLB0QWeS$Eg.YfWY8clZuCxF0xNrKg.", "0123456789ABCDE"},
/* following hashes are AIX standard smd5 hashes (with corrected tag)
* lpa_options = std_hash=true */
{"$1$JVDbGx8K$T9h8HK4LZxeLPMTAxCfpc1", "password"},
{"$1$1Cu6fEvv$42kuaJ5fMEqyVStPuFG040", "0123456789ABCDE"},
{"$1$ql5x.xXL$vYVDhExol2xUBBpERRWcn1", "jtr>hashcat"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int is_standard;
unsigned char salt[16];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
if (strncmp(ciphertext, "{smd5}", 6) != 0 &&
strncmp(ciphertext, "$1$", 3))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
if (!strncmp(ciphertext, "{smd5}", 6))
ctcopy += 6;
else
ctcopy += 3;
if ((p = strtok(ctcopy, "$")) == NULL) /* salt */
goto err;
if (strlen(p) != 8)
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* hash */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
keeptr = ctcopy;
if (!strncmp(ciphertext, "{smd5}", 6)) {
ctcopy += 6;
cs.is_standard = 0;
}
else {
ctcopy += 3;
cs.is_standard = 1;
}
p = strtok(ctcopy, "$");
strncpy((char*)cs.salt, p, 9);
p = strtok(NULL, "$");
MEM_FREE(keeptr);
return (void *)&cs;
}
#define TO_BINARY(b1, b2, b3) \
value = \
(ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18); \
pos += 4; \
out.b[b1] = value >> 16; \
out.b[b2] = value >> 8; \
out.b[b3] = value;
static void* get_binary(char *ciphertext)
{
static union {
char b[16];
ARCH_WORD w;
} out;
char *pos;
ARCH_WORD_32 value;
pos = ciphertext + 3;
if (!strncmp(ciphertext, "{smd5}", 6))
pos = ciphertext + 6;
while (*pos++ != '$');
TO_BINARY(0, 6, 12);
TO_BINARY(1, 7, 13);
TO_BINARY(2, 8, 14);
TO_BINARY(3, 9, 15);
TO_BINARY(4, 10, 5);
out.b[11] =
(ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] |
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6);
return out.b;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
/*
* $Id: md5_crypt.c,v 1.1 2002-05-11 14:42:35 cpbotha Exp $
*
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
* <phk@login.dknet.dk> wrote this file. As long as you retain this notice you
* can do whatever you want with this stuff. If we meet some day, and you think
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* Origin: Id: crypt.c,v 1.3 1995/05/30 05:42:22 rgrimes Exp
*
*/
static void crypt_md5(char *pw, char *salt, int is_standard, char *passwd)
{
char *magic = "$1$";
/* This string is magic for this algorithm. Having
* it this way, we can get get better later on */
char *sp, *ep;
unsigned char final[16];
int sl, pl, i, j;
MD5_CTX ctx, ctx1;
/* Refine the Salt first */
sp = salt;
/* If it starts with the magic string, then skip that */
if (!strncmp(sp, magic, strlen(magic)))
sp += strlen(magic);
/* It stops at the first '$', max 8 chars */
for (ep = sp; *ep && *ep != '$' && ep < (sp + 8); ep++)
continue;
/* get the length of the true salt */
sl = ep - sp;
MD5_Init(&ctx);
/* The password first, since that is what is most unknown */
MD5_Update(&ctx,(unsigned char *)pw,strlen(pw));
// The following license text applies to the "if" code block
// License: belongs to the PUBLIC DOMAIN, donated to hashcat, credits MUST go to atom
// (hashcat) and philsmd for their hard work. Thx
// Disclaimer: WE PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER
// EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// Furthermore, NO GUARANTEES THAT IT WORKS FOR YOU AND WORKS CORRECTLY
if (is_standard) {
/* Then our magic string */
MD5_Update(&ctx,(unsigned char *)magic,strlen(magic));
/* Then the raw salt */
MD5_Update(&ctx,(unsigned char *)sp,sl);
}
else {
MD5_Update(&ctx,(unsigned char *)sp,sl);
}
/* Then just as many characters of the MD5_(pw,salt,pw) */
MD5_Init(&ctx1);
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
MD5_Update(&ctx1,(unsigned char *)sp,sl);
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
MD5_Final(final,&ctx1);
for (pl = strlen(pw); pl > 0; pl -= 16)
MD5_Update(&ctx,(unsigned char *)final,pl>16 ? 16 : pl);
memset(final, 0, sizeof final);
/* Then something really weird... */
for (j = 0, i = strlen(pw); i; i >>= 1)
if (i & 1)
MD5_Update(&ctx, (unsigned char *)final+j, 1);
else
MD5_Update(&ctx, (unsigned char *)pw+j, 1);
/* Now make the output string */
strcpy(passwd, magic);
strncat(passwd, sp, sl);
strcat(passwd, "$");
MD5_Final(final,&ctx);
/*
* and now, just to make sure things don't run too fast
* On a 60 Mhz Pentium this takes 34 msec, so you would
* need 30 seconds to build a 1000 entry dictionary...
*/
for (i = 0; i < 1000; i++) {
MD5_Init(&ctx1);
if (i & 1)
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
else
MD5_Update(&ctx1,(unsigned char *)final,16);
if (i % 3)
MD5_Update(&ctx1,(unsigned char *)sp,sl);
if (i % 7)
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
if (i & 1)
MD5_Update(&ctx1,(unsigned char *)final,16);
else
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
MD5_Final(final,&ctx1);
}
memcpy(passwd, final, 16);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
crypt_md5(saved_key[index], (char*)cur_salt->salt, cur_salt->is_standard, (char *)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void smd5_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_smd5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
smd5_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
smd5_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
omp_sections_reduction.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int test_omp_sections_reduction()
{
int sum;
int known_sum;
double dpt,dsum;
double dknown_sum;
double dt=0.5; /* base of geometric row for + and - test*/
double rounding_error= 1.E-9;
int diff;
double ddiff;
int product;
int known_product;
int logic_and;
int bit_and;
int logic_or;
int bit_or;
int exclusiv_bit_or;
int logics[1000];
int i;
int result;
/* int my_islarger; */
/*int is_larger=1;*/
sum =7;
dpt =1;
dsum=0;
product =1;
logic_and=1;
bit_and=1;
logic_or=0;
bit_or=0;
exclusiv_bit_or=0;
result = 0;
dt = 1./3.;
known_sum = (999*1000)/2+7;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(+:sum)
{
#pragma omp section
{
for (i=1;i<300;i++) {
sum=sum+i;
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
sum=sum+i;
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
sum=sum+i;
}
}
}
}
if(known_sum!=sum) {
++result;
fprintf(stderr,"Error in sum with integers: Result was %d"
" instead of %d\n", sum,known_sum);
}
diff = (999*1000)/2;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(-:diff)
{
#pragma omp section
{
for (i=1;i<300;i++) {
diff=diff-i;
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
diff=diff-i;
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
diff=diff-i;
}
}
}
}
if(diff != 0) {
result++;
fprintf(stderr,"Error in Difference with integers: Result was %d"
" instead of 0.\n",diff);
}
for (i=0;i<20;++i) {
dpt*=dt;
}
dknown_sum = (1-dpt)/(1-dt);
#pragma omp parallel
{
#pragma omp sections private(i) reduction(+:dsum)
{
#pragma omp section
{
for (i=0;i<6;++i) {
dsum += pow(dt,i);
}
}
#pragma omp section
{
for (i=6;i<12;++i) {
dsum += pow(dt,i);
}
}
#pragma omp section
{
for (i=12;i<20;++i) {
dsum += pow(dt,i);
}
}
}
}
if( fabs(dsum-dknown_sum) > rounding_error ) {
result++;
fprintf(stderr,"Error in sum with doubles: Result was %f"
" instead of %f (Difference: %E)\n",
dsum, dknown_sum, dsum-dknown_sum);
}
dpt=1;
for (i=0;i<20;++i) {
dpt*=dt;
}
fprintf(stderr,"\n");
ddiff = (1-dpt)/(1-dt);
#pragma omp parallel
{
#pragma omp sections private(i) reduction(-:ddiff)
{
#pragma omp section
{
for (i=0;i<6;++i) {
ddiff -= pow(dt,i);
}
}
#pragma omp section
{
for (i=6;i<12;++i) {
ddiff -= pow(dt,i);
}
}
#pragma omp section
{
for (i=12;i<20;++i) {
ddiff -= pow(dt,i);
}
}
}
}
if(fabs(ddiff) > rounding_error) {
result++;
fprintf(stderr,"Error in Difference with doubles: Result was %E"
" instead of 0.0\n",ddiff);
}
known_product = 3628800;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(*:product)
{
#pragma omp section
{
for(i=1;i<3;i++) {
product *= i;
}
}
#pragma omp section
{
for(i=3;i<7;i++) {
product *= i;
}
}
#pragma omp section
{
for(i=7;i<11;i++) {
product *= i;
}
}
}
}
if(known_product != product) {
result++;
fprintf(stderr,"Error in Product with integers: Result was %d"
" instead of %d\n",product,known_product);
}
for(i=0;i<1000;i++) {
logics[i]=1;
}
#pragma omp parallel
{
#pragma omp sections private(i) reduction(&&:logic_and)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_and = (logic_and && logics[i]);
}
}
}
}
if(!logic_and) {
result++;
fprintf(stderr,"Error in logic AND part 1\n");
}
logic_and = 1;
logics[501] = 0;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(&&:logic_and)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_and = (logic_and && logics[i]);
}
}
}
}
if(logic_and) {
result++;
fprintf(stderr,"Error in logic AND part 2\n");
}
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel
{
#pragma omp sections private(i) reduction(||:logic_or)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_or = (logic_or || logics[i]);
}
}
}
}
if(logic_or) {
result++;
fprintf(stderr,"\nError in logic OR part 1\n");
}
logic_or = 0;
logics[501]=1;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(||:logic_or)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_or = (logic_or || logics[i]);
}
}
}
}
if(!logic_or) {
result++;
fprintf(stderr,"Error in logic OR part 2\n");
}
for(i=0;i<1000;++i) {
logics[i]=1;
}
#pragma omp parallel
{
#pragma omp sections private(i) reduction(&:bit_and)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_and = (bit_and & logics[i]);
}
}
}
}
if(!bit_and) {
result++;
fprintf(stderr,"Error in BIT AND part 1\n");
}
bit_and = 1;
logics[501]=0;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(&:bit_and)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_and = bit_and & logics[i];
}
}
}
}
if(bit_and) {
result++;
fprintf(stderr,"Error in BIT AND part 2\n");
}
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel
{
#pragma omp sections private(i) reduction(|:bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_or = bit_or | logics[i];
}
}
}
}
if(bit_or) {
result++;
fprintf(stderr,"Error in BIT OR part 1\n");
}
bit_or = 0;
logics[501]=1;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(|:bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_or = bit_or | logics[i];
}
}
}
}
if(!bit_or) {
result++;
fprintf(stderr,"Error in BIT OR part 2\n");
}
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel
{
#pragma omp sections private(i) reduction(^:exclusiv_bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
}
if(exclusiv_bit_or) {
result++;
fprintf(stderr,"Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[501]=1;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(^:exclusiv_bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
}
if(!exclusiv_bit_or) {
result++;
fprintf(stderr,"Error in EXCLUSIV BIT OR part 2\n");
}
/*printf("\nResult:%d\n",result);*/
return (result==0);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_sections_reduction()) {
num_failed++;
}
}
return num_failed;
}
|
psrs_sort.c | #include <omp.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
/* headers */
int fcompare(const void *ptr2num1, const void *ptr2num2);
float *merge(float *left, float *right, int l_end, int r_end);
float *merge_sort(float *arr, int size);
void insertion_sort(float *arr, int n);
void calc_partition_borders(float array[],
int start,
int end,
int sublist_sizes[],
int at,
float pivots[],
int first_p,
int last_p);
void psrs_sort(float *a, int n);
void sortf(float *a, int len);
/* sort an array in non-descending order */
void psrs_sort(float *a, int n)
{
if (n > 1)
{
if (n <= 55)
{
// Testing shows that sequential insertion sort is quickest when n <= 55 (approx.)
insertion_sort(a, n);
}
else if (n <= 10000)
{
// Testing shows that sequential merge sort is quickest when n <= 10000(approx.)
merge_sort(a, n);
}
else
{
// Testing shows that our algorithm is now the quickest
int p, size, rsize, sample_size;
float *sample, *pivots;
int *partition_borders, *bucket_sizes, *result_positions;
float **loc_a_ptrs;
// Determine the appropriate number of threads to use
// p^3 <= n - We need this to hold true
p = omp_get_max_threads();
p = p * p * p;
if (p > n)
{
p = floor(pow(n, 0.33));
p -= p % 2;
}
else
{
p = omp_get_max_threads();
p -= p % 2;
}
omp_set_num_threads(p);
printf("psrs_sort #threads: %d\n", p);
size = (n + p - 1) / p;
rsize = (size + p - 1) / p;
sample_size = p * (p - 1);
loc_a_ptrs = (float **)malloc(p * sizeof(float *));
sample = (float *)malloc(sample_size * sizeof(float));
partition_borders = (int *)malloc(p * (p + 1) * sizeof(int));
bucket_sizes = (int *)malloc(p * sizeof(int));
result_positions = (int *)malloc(p * sizeof(int));
pivots = (float *)malloc((p - 1) * sizeof(float));
#pragma omp parallel
{
int i, j, max, thread_num, start, end, loc_size, offset, this_result_size;
float *loc_a, *this_result, *current_a;
thread_num = omp_get_thread_num();
start = thread_num * size;
end = start + size - 1;
if (end >= n)
end = n - 1;
loc_size = (end - start + 1);
end = end % size;
loc_a = (float *)malloc(loc_size * sizeof(float));
memcpy(loc_a, a + start, loc_size * sizeof(float));
loc_a_ptrs[thread_num] = loc_a;
sortf(loc_a, loc_size); // Testing shows that this sequential sort is quickest in this instance
offset = thread_num * (p - 1) - 1;
for (i = 1; i < p; i++)
{
if (i * rsize <= end)
{
sample[offset + i] = loc_a[i * rsize - 1];
}
else
{
sample[offset + i] = loc_a[end];
}
}
#pragma omp barrier
#pragma omp single
{
merge_sort(sample, sample_size); // Testing shows that this sequential sort is quickest in this instance
for (i = 0; i < p - 1; i++)
{
pivots[i] = sample[i * p + p / 2];
}
}
#pragma omp barrier
offset = thread_num * (p + 1);
partition_borders[offset] = 0;
partition_borders[offset + p] = end + 1;
calc_partition_borders(loc_a, 0, loc_size - 1, partition_borders, offset, pivots, 1, p - 1);
#pragma omp barrier
max = p * (p + 1);
bucket_sizes[thread_num] = 0;
for (i = thread_num; i < max; i += p + 1)
{
bucket_sizes[thread_num] += partition_borders[i + 1] - partition_borders[i];
}
#pragma omp barrier
#pragma omp single
{
result_positions[0] = 0;
for (i = 1; i < p; i++)
{
result_positions[i] = bucket_sizes[i - 1] + result_positions[i - 1];
}
}
#pragma omp barrier
this_result = a + result_positions[thread_num];
if (thread_num == p - 1)
{
this_result_size = n - result_positions[thread_num];
}
else
{
this_result_size = result_positions[thread_num + 1] - result_positions[thread_num];
}
// pluck this threads sublist from each of the local arrays
this_result = a + result_positions[thread_num];
for (i = 0, j = 0; i < p; i++)
{
int low, high, partition_size;
offset = i * (p + 1) + thread_num;
low = partition_borders[offset];
high = partition_borders[offset + 1];
partition_size = (high - low);
if (partition_size > 0)
{
memcpy(this_result + j, &(loc_a_ptrs[i][low]), partition_size * sizeof(float));
j += partition_size;
}
}
// sort p local sorted arrays
sortf(this_result, this_result_size); // Testing shows that this sequential sort is quickest in this instance
#pragma omp barrier
free(loc_a);
}
free(loc_a_ptrs);
free(sample);
free(partition_borders);
free(bucket_sizes);
free(result_positions);
free(pivots);
}
}
}
/* determine the boundaries for the sublists of an local array */
void calc_partition_borders(float array[], // array being sorted
int start,
int end, // separate the array into current process range
int result[],
int at, // this process start point in result
float pivots[], // the pivot values
int first_pv, // first pivot
int last_pv) // last pivot
{
int mid, lowerbound, upperbound, center;
float pv;
mid = (first_pv + last_pv) / 2;
pv = pivots[mid - 1];
lowerbound = start;
upperbound = end;
while (lowerbound <= upperbound)
{
center = (lowerbound + upperbound) / 2;
if (array[center] > pv)
{
upperbound = center - 1;
}
else
{
lowerbound = center + 1;
}
}
result[at + mid] = lowerbound;
if (first_pv < mid)
{
calc_partition_borders(array, start, lowerbound - 1, result, at, pivots, first_pv, mid - 1);
}
if (mid < last_pv)
{
calc_partition_borders(array, lowerbound, end, result, at, pivots, mid + 1, last_pv);
}
}
/* Compare floats */
int fcompare(const void *ptr2num1, const void *ptr2num2)
{
float num1 = *((float *)ptr2num1);
float num2 = *((float *)ptr2num2);
if (num1 > num2)
return 1;
else if (num1 < num2)
return -1;
else
return 0;
}
/*
Sort a portion of an array
@todo: see if merge sort might be better in some cases
*/
void sortf(float *a, int len)
{
qsort(a, len, sizeof(float), fcompare);
}
/*
Standard merge sort
*/
float *merge_sort(float *arr, int size)
{
// Arrays shorter than 1 are already sorted
if (size > 1)
{
int middle = size / 2, i;
float *left, *right;
left = arr;
right = arr + middle;
left = merge_sort(left, middle);
right = merge_sort(right, size - middle);
return merge(left, right, middle, size - middle);
}
else
{
return arr;
}
}
float *merge(float *left, float *right, int l_end, int r_end)
{
int temp_off, l_off, r_off, size = l_end + r_end;
float *temp = (float *)malloc(sizeof(float) * l_end);
// Copy lower half into temp buffer
for (l_off = 0, temp_off = 0; left + l_off != right; l_off++, temp_off++)
{
*(temp + temp_off) = *(left + l_off);
}
temp_off = 0;
l_off = 0;
r_off = 0;
while (l_off < size)
{
if (temp_off < l_end)
{
if (r_off < r_end)
{
if (*(temp + temp_off) < *(right + r_off))
{
*(left + l_off) = *(temp + temp_off);
temp_off++;
}
else
{
*(left + l_off) = *(right + r_off);
r_off++;
}
}
else
{
*(left + l_off) = *(temp + temp_off);
temp_off++;
}
}
else
{
if (r_off < r_end)
{
*(left + l_off) = *(right + r_off);
r_off++;
}
else
{
printf("\nERROR - merging loop going too far\n");
}
}
l_off++;
}
free(temp);
return left;
}
/*
Standard insertion sort
*/
void insertion_sort(float *arr, int n)
{
int i, j, k, temp;
for (i = 1; i <= n; i++)
{
for (j = 0; j < i; j++)
{
if (arr[j] > arr[i])
{
temp = arr[j];
arr[j] = arr[i];
for (k = i; k > j; k--)
arr[k] = arr[k - 1];
arr[k + 1] = temp;
}
}
}
}
|
hopscotch_hash.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_utilities.h"
static HYPRE_Int NearestPowerOfTwo( HYPRE_Int value )
{
HYPRE_Int rc = 1;
while (rc < value)
{
rc <<= 1;
}
return rc;
}
static void InitBucket(hypre_HopscotchBucket *b)
{
b->hopInfo = 0;
b->hash = HYPRE_HOPSCOTCH_HASH_EMPTY;
}
static void InitBigBucket(hypre_BigHopscotchBucket *b)
{
b->hopInfo = 0;
b->hash = HYPRE_HOPSCOTCH_HASH_EMPTY;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
static void InitSegment(hypre_HopscotchSegment *s)
{
s->timestamp = 0;
omp_init_lock(&s->lock);
}
static void DestroySegment(hypre_HopscotchSegment *s)
{
omp_destroy_lock(&s->lock);
}
#endif
void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel)
{
s->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1;
if (inCapacity < s->segmentMask + 1)
{
inCapacity = s->segmentMask + 1;
}
//ADJUST INPUT ............................
HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity + 4096);
HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1;
s->bucketMask = adjInitCap - 1;
HYPRE_Int i;
//ALLOCATE THE SEGMENTS ...................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
s->segments = hypre_TAlloc(hypre_HopscotchSegment, s->segmentMask + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= s->segmentMask; ++i)
{
InitSegment(&s->segments[i]);
}
#endif
s->hopInfo = hypre_TAlloc(hypre_uint, num_buckets, HYPRE_MEMORY_HOST);
s->key = hypre_TAlloc(HYPRE_Int, num_buckets, HYPRE_MEMORY_HOST);
s->hash = hypre_TAlloc(HYPRE_Int, num_buckets, HYPRE_MEMORY_HOST);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel for
#endif
for (i = 0; i < num_buckets; ++i)
{
s->hopInfo[i] = 0;
s->hash[i] = HYPRE_HOPSCOTCH_HASH_EMPTY;
}
}
void hypre_UnorderedBigIntSetCreate( hypre_UnorderedBigIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel)
{
s->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1;
if (inCapacity < s->segmentMask + 1)
{
inCapacity = s->segmentMask + 1;
}
//ADJUST INPUT ............................
HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity + 4096);
HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1;
s->bucketMask = adjInitCap - 1;
HYPRE_Int i;
//ALLOCATE THE SEGMENTS ...................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
s->segments = hypre_TAlloc(hypre_HopscotchSegment, s->segmentMask + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= s->segmentMask; ++i)
{
InitSegment(&s->segments[i]);
}
#endif
s->hopInfo = hypre_TAlloc(hypre_uint, num_buckets, HYPRE_MEMORY_HOST);
s->key = hypre_TAlloc(HYPRE_BigInt, num_buckets, HYPRE_MEMORY_HOST);
s->hash = hypre_TAlloc(HYPRE_BigInt, num_buckets, HYPRE_MEMORY_HOST);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel for
#endif
for (i = 0; i < num_buckets; ++i)
{
s->hopInfo[i] = 0;
s->hash[i] = HYPRE_HOPSCOTCH_HASH_EMPTY;
}
}
void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel)
{
m->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1;
if (inCapacity < m->segmentMask + 1)
{
inCapacity = m->segmentMask + 1;
}
//ADJUST INPUT ............................
HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity + 4096);
HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1;
m->bucketMask = adjInitCap - 1;
HYPRE_Int i;
//ALLOCATE THE SEGMENTS ...................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
m->segments = hypre_TAlloc(hypre_HopscotchSegment, m->segmentMask + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= m->segmentMask; i++)
{
InitSegment(&m->segments[i]);
}
#endif
m->table = hypre_TAlloc(hypre_HopscotchBucket, num_buckets, HYPRE_MEMORY_HOST);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel for
#endif
for (i = 0; i < num_buckets; i++)
{
InitBucket(&m->table[i]);
}
}
void hypre_UnorderedBigIntMapCreate( hypre_UnorderedBigIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel)
{
m->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1;
if (inCapacity < m->segmentMask + 1)
{
inCapacity = m->segmentMask + 1;
}
//ADJUST INPUT ............................
HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity + 4096);
HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1;
m->bucketMask = adjInitCap - 1;
HYPRE_Int i;
//ALLOCATE THE SEGMENTS ...................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
m->segments = hypre_TAlloc(hypre_HopscotchSegment, m->segmentMask + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= m->segmentMask; i++)
{
InitSegment(&m->segments[i]);
}
#endif
m->table = hypre_TAlloc(hypre_BigHopscotchBucket, num_buckets, HYPRE_MEMORY_HOST);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel for
#endif
for (i = 0; i < num_buckets; i++)
{
InitBigBucket(&m->table[i]);
}
}
void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s )
{
hypre_TFree(s->hopInfo, HYPRE_MEMORY_HOST);
hypre_TFree(s->key, HYPRE_MEMORY_HOST);
hypre_TFree(s->hash, HYPRE_MEMORY_HOST);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int i;
for (i = 0; i <= s->segmentMask; i++)
{
DestroySegment(&s->segments[i]);
}
hypre_TFree(s->segments, HYPRE_MEMORY_HOST);
#endif
}
void hypre_UnorderedBigIntSetDestroy( hypre_UnorderedBigIntSet *s )
{
hypre_TFree(s->hopInfo, HYPRE_MEMORY_HOST);
hypre_TFree(s->key, HYPRE_MEMORY_HOST);
hypre_TFree(s->hash, HYPRE_MEMORY_HOST);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int i;
for (i = 0; i <= s->segmentMask; i++)
{
DestroySegment(&s->segments[i]);
}
hypre_TFree(s->segments, HYPRE_MEMORY_HOST);
#endif
}
void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m)
{
hypre_TFree(m->table, HYPRE_MEMORY_HOST);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int i;
for (i = 0; i <= m->segmentMask; i++)
{
DestroySegment(&m->segments[i]);
}
hypre_TFree(m->segments, HYPRE_MEMORY_HOST);
#endif
}
void hypre_UnorderedBigIntMapDestroy( hypre_UnorderedBigIntMap *m)
{
hypre_TFree(m->table, HYPRE_MEMORY_HOST);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int i;
for (i = 0; i <= m->segmentMask; i++)
{
DestroySegment(&m->segments[i]);
}
hypre_TFree(m->segments, HYPRE_MEMORY_HOST);
#endif
}
HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len )
{
/*HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1];*/
HYPRE_Int *prefix_sum_workspace;
HYPRE_Int *ret_array = NULL;
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel
#endif
{
HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, n);
HYPRE_Int cnt = 0;
HYPRE_Int i;
for (i = i_begin; i < i_end; i++)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { cnt++; }
}
hypre_prefix_sum(&cnt, len, prefix_sum_workspace);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp barrier
#pragma omp master
#endif
{
ret_array = hypre_TAlloc(HYPRE_Int, *len, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp barrier
#endif
for (i = i_begin; i < i_end; i++)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { ret_array[cnt++] = s->key[i]; }
}
}
hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST);
return ret_array;
}
HYPRE_BigInt *hypre_UnorderedBigIntSetCopyToArray( hypre_UnorderedBigIntSet *s, HYPRE_Int *len )
{
/*HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1];*/
HYPRE_Int *prefix_sum_workspace;
HYPRE_BigInt *ret_array = NULL;
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel
#endif
{
HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, n);
HYPRE_Int cnt = 0;
HYPRE_Int i;
for (i = i_begin; i < i_end; i++)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { cnt++; }
}
hypre_prefix_sum(&cnt, len, prefix_sum_workspace);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp barrier
#pragma omp master
#endif
{
ret_array = hypre_TAlloc(HYPRE_BigInt, *len, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp barrier
#endif
for (i = i_begin; i < i_end; i++)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { ret_array[cnt++] = s->key[i]; }
}
}
hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST);
return ret_array;
}
|
hello_omp.c | /* Hello World OpenMP
*
* Compile on Triton as:
* gcc -fopenmp hello_omp.c -o hello_omp
*
* degtyai1, Wed, 28 May 2014 12:47:47 +0300
* tuomiss1, Mon, 08 Jun 2020
*
*/
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
int main(void) {
#if defined(_OPENMP)
#pragma omp parallel
printf("Hello, world from thread %d.\n", omp_get_thread_num());
#else
printf("Hello, world.\n");
#endif
return 0;
}
|
GB_unop__log1p_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log1p_fp32_fp32)
// op(A') function: GB (_unop_tran__log1p_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = log1pf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log1pf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = log1pf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG1P || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log1p_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log1pf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log1pf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log1p_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fft-cuda.c | /* Copyright 2013, 2015. The Regents of the University of California.
* Copyright 2019. Uecker Lab, University Medical Center Göttingen.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2019 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* Christian Holme <christian.holme@med.uni-goettingen.de>
*
*
* Internal interface to the CUFFT library used in fft.c.
*/
#include <stdbool.h>
#include <complex.h>
#include <assert.h>
#include <limits.h>
#include "misc/misc.h"
#include "num/multind.h"
#include "fft-cuda.h"
#ifdef USE_CUDA
#include <cufft.h>
#include "num/gpuops.h"
#ifndef CFL_SIZE
#define CFL_SIZE sizeof(complex float)
#endif
struct fft_cuda_plan_s {
cufftHandle cufft;
struct fft_cuda_plan_s* chain;
bool backwards;
long batch;
long idist;
long odist;
};
struct iovec {
long n;
long is;
long os;
};
// detect if flags has blocks of 1's seperated by 0's
static bool noncontiguous_flags(int D, unsigned long flags)
{
bool o = false;
bool z = false;
for (int i = 0; i < D; i++) {
bool curr_bit = MD_IS_SET(flags, i);
if (curr_bit) // found a block of ones
o = true;
if (o && !curr_bit) // found the end of a block of ones
z = true;
if (o && z && curr_bit) // found a second block of ones
return true;
}
return false;
}
static struct fft_cuda_plan_s* fft_cuda_plan0(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], const long istrides[D], bool backwards)
{
// TODO: This is not optimal, as it will often create separate fft's where they
// are not needed. And since we compute blocks, we could also recurse
// into both blocks...
if (noncontiguous_flags(D, flags))
return NULL;
PTR_ALLOC(struct fft_cuda_plan_s, plan);
unsigned int N = D;
plan->batch = 1;
plan->odist = 0;
plan->idist = 0;
plan->backwards = backwards;
plan->chain = NULL;
struct iovec dims[N];
struct iovec hmdims[N];
assert(0 != flags);
// the cufft interface is strange, but we do our best...
unsigned int k = 0;
unsigned int l = 0;
for (unsigned int i = 0; i < N; i++) {
if (1 == dimensions[i])
continue;
if (MD_IS_SET(flags, i)) {
dims[k].n = dimensions[i];
dims[k].is = istrides[i] / CFL_SIZE;
dims[k].os = ostrides[i] / CFL_SIZE;
k++;
} else {
hmdims[l].n = dimensions[i];
hmdims[l].is = istrides[i] / CFL_SIZE;
hmdims[l].os = ostrides[i] / CFL_SIZE;
l++;
}
}
assert(k > 0);
int cudims[k];
int cuiemb[k];
int cuoemb[k];
long batchdims[l];
long batchistr[l];
long batchostr[l];
int lis = dims[0].is;
int los = dims[0].os;
if (k > 3)
goto errout;
for (unsigned int i = 0; i < k; i++) {
// assert(dims[i].is == lis);
// assert(dims[i].os == los);
cudims[k - 1 - i] = dims[i].n;
cuiemb[k - 1 - i] = dims[i].n;
cuoemb[k - 1 - i] = dims[i].n;
lis = dims[i].n * dims[i].is;
los = dims[i].n * dims[i].os;
}
for (unsigned int i = 0; i < l; i++) {
batchdims[i] = hmdims[i].n;
batchistr[i] = hmdims[i].is;
batchostr[i] = hmdims[i].os;
}
int istride = dims[0].is;
int ostride = dims[0].os;
int idist = lis;
int odist = los;
int cubs = 1;
// check that batch dimensions can be collapsed to one
unsigned int bi = md_calc_blockdim(l, batchdims, batchistr, hmdims[0].is);
unsigned int bo = md_calc_blockdim(l, batchdims, batchostr, hmdims[0].os);
if (bi != bo)
goto errout;
if (bi > 0) {
idist = hmdims[0].is;
odist = hmdims[0].os;
cubs = md_calc_size(bi, batchdims);
}
if (l != bi) {
// check that batch dimensions can be collapsed to one
if (l - bi != md_calc_blockdim(l - bi, batchdims + bi, batchistr + bi, hmdims[bi].is))
goto errout;
if (l - bo != md_calc_blockdim(l - bo, batchdims + bo, batchostr + bo, hmdims[bo].os))
goto errout;
plan->idist = hmdims[bi].is;
plan->odist = hmdims[bo].os;
plan->batch = md_calc_size(l - bi, batchdims + bi);
}
assert(k <= 3);
int err;
#pragma omp critical
err = cufftPlanMany(&plan->cufft, k,
cudims, cuiemb, istride, idist,
cuoemb, ostride, odist, CUFFT_C2C, cubs);
if (CUFFT_SUCCESS != err)
goto errout;
return PTR_PASS(plan);
errout:
PTR_FREE(plan);
return NULL;
}
static unsigned long find_msb(unsigned long flags)
{
for (unsigned int i = 1; i < CHAR_BIT * sizeof(flags); i *= 2)
flags |= flags >> i;
return (flags + 1) / 2;
}
struct fft_cuda_plan_s* fft_cuda_plan(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], const long istrides[D], bool backwards)
{
struct fft_cuda_plan_s* plan = fft_cuda_plan0(D, dimensions, flags, ostrides, istrides, backwards);
if (NULL != plan)
return plan;
unsigned long msb = find_msb(flags);
if (flags & msb) {
struct fft_cuda_plan_s* plan = fft_cuda_plan0(D, dimensions, msb, ostrides, istrides, backwards);
if (NULL == plan)
return NULL;
plan->chain = fft_cuda_plan(D, dimensions, flags & ~msb, ostrides, ostrides, backwards);
if (NULL == plan->chain) {
fft_cuda_free_plan(plan);
return NULL;
}
return plan;
}
return NULL;
}
void fft_cuda_free_plan(struct fft_cuda_plan_s* cuplan)
{
if (NULL != cuplan->chain)
fft_cuda_free_plan(cuplan->chain);
cufftDestroy(cuplan->cufft);
xfree(cuplan);
}
void fft_cuda_exec(struct fft_cuda_plan_s* cuplan, complex float* dst, const complex float* src)
{
assert(cuda_ondevice(src));
assert(cuda_ondevice(dst));
assert(NULL != cuplan);
int err;
for (int i = 0; i < cuplan->batch; i++) {
if (CUFFT_SUCCESS != (err = cufftExecC2C(cuplan->cufft,
(cufftComplex*)src + i * cuplan->idist,
(cufftComplex*)dst + i * cuplan->odist,
(!cuplan->backwards) ? CUFFT_FORWARD : CUFFT_INVERSE)))
error("CUFFT: %d\n", err);
}
if (NULL != cuplan->chain)
fft_cuda_exec(cuplan->chain, dst, dst);
}
#endif
|
multiplayer.h | //Windiarta - 2006535792
void multi(int refresh)
{
int sizey = 24;
int sizex = 40;
int x, y, yi;
char world[sizey][sizex];
char world2[sizey][sizex];
char player = 'A';
char playerLaser = '^';
char enemy = 'M';
char enemyShielded = 'O';
char enemyLaser = 'v';
char explosion = 'X';
int score = 0, score2 = 0;
int victory = 1, victory2 = 1;
int laserReady1 = 1, laserReady2 = 1;
int enemyReady = 0;
int j;
double t1, t2;
print_wait_3s();
int totalEnemies = 0;
//arena
#pragma omp for
for (x = 0; x < sizex; x ++) {
for (y = 0; y < sizey; y ++) {
if ((y+1) % 2 == 0 && y < 7 && x > 4
&& x < sizex - 5 && x % 2 ==0) {
world[y][x] = enemy;
totalEnemies ++;
}
else if ((y+1) % 2 == 0 && y >= 7 && y < 9 && x > 4
&& x < sizex - 5 && x % 2 ==0){
world[y][x] = enemyShielded;
totalEnemies = totalEnemies + 2;
}
else {
world[y][x] = ' ';
}
}
}
world[sizey - 2][sizex / 2] = player;
int i = 1;
char direction = 'l';
int currentEnemies = totalEnemies;
//copy world setup
#pragma omp for
for (j = 0; j < sizey; j++){
strcpy(world2[j], world[j]);
}
t1 = omp_get_wtime();
#pragma omp parallel
{
#pragma omp master
{
do{
char keyPress;
i++;
if(kbhit()){
keyPress = tolower(getch());
//Player movement
#pragma omp task shared (world, world2, keyPress)
{
laserReady1++;
laserReady2++;
if (keyPress == 'a') {
#pragma omp critical
{
for (x = 0; x < sizex; x = x+1) {
if ( world[sizey-2][x+1] == player) {
world[sizey-2][x] = player;
world[sizey-2][x+1] = ' ';
}
}
}
}
if (keyPress == 'd') {
#pragma omp critical
{
for (x = sizex - 1; x > 0; x = x-1) {
if ( world[sizey-2][x-1] == player) {
world[sizey-2][x] = player;
world[sizey-2][x-1] = ' ';
}
}
}
}
if (keyPress == 'w'){
#pragma omp critical
{
if (laserReady1 > 2) {
for (x = 0; x < sizex; x = x+1) {
if ( world[sizey-2][x] == player) {
world[sizey - 3][x] = playerLaser;
laserReady1 = 0;
}
}
}
}
}
if (keyPress == 'j') {
#pragma omp critical
{
for (x = 0; x < sizex; x = x+1) {
if ( world2[sizey-2][x+1] == player) {
world2[sizey-2][x] = player;
world2[sizey-2][x+1] = ' ';
}
}
}
}
if (keyPress == 'l') {
#pragma omp critical
{
for (x = sizex - 1; x > 0; x = x-1) {
if ( world2[sizey-2][x-1] == player) {
world2[sizey-2][x] = player;
world2[sizey-2][x-1] = ' ';
}
}
}
}
if (keyPress == 'i')
{
#pragma omp critical
{
if (laserReady2 > 2) {
for (x = 0; x < sizex; x = x+1) {
if ( world2[sizey-2][x] == player) {
world2[sizey - 3][x] = playerLaser;
laserReady2 = 0;
}
}
}
}
}
}
} else {
keyPress = ' ';
}
//enemy laser spawner
#pragma omp task shared (world, world2)
{
#pragma omp critical
{
//enemy laser down world 1
for (x = 0; x < sizex; x ++) {
for (y = sizey-1; y >= 0; y --) {
if (i%2 == 0 && world[y][x] == enemyLaser){
if(world[y+1][x] != enemy && world[y+1][x] != enemyShielded){
world[y+1][x] = enemyLaser;
world[y][x] = ' ';
} else if (world[y+1][x] == enemy || world[y+1][x] == enemyShielded){
world[y][x] = ' ';
}
}
}
}
}
#pragma omp critical
{
//enemy laser down world 2
for (x = 0; x < sizex; x ++) {
for (y = sizey-1; y >= 0; y --) {
if (i%2 == 0 && world2[y][x] == enemyLaser){
if(world2[y+1][x] != enemy && world2[y+1][x] != enemyShielded){
world2[y+1][x] = enemyLaser;
world2[y][x] = ' ';
} else if (world2[y+1][x] == enemy || world2[y+1][x] == enemyShielded){
world2[y][x] = ' ';
}
}
}
}
}
}
//game management of world 1
#pragma omp task shared (world, score, victory)
{
int drop = 0;
int enemySpeed = 1 + 10 * currentEnemies / totalEnemies;
#pragma omp critical
{
for (x = 0; x < sizex; x ++) {
for (y = 0; y < sizey; y ++) {
if ((i % 5) == 0 && (world[y][x] == enemyShielded
|| world[y][x] == enemy) && (rand() % 15) > 13
&& world[y+1][x] != playerLaser) {
for (yi = y+1; yi < sizey; yi ++) {
if (world[yi][x] == enemy
|| world[yi][x] == enemyShielded) {
enemyReady = 0;
break;
}
enemyReady = 1;
}
if (enemyReady) {
world[y+1][x] = enemyLaser;
}
}
if (world[y][x] == playerLaser && world[y-1][x] == enemy) {
world[y][x] = ' ';
world[y-1][x] = explosion;
currentEnemies --;
score = score + 50;
}
else if (world[y][x] == playerLaser
&& world[y-1][x] == enemyShielded) {
world[y][x] = ' ';
world[y-1][x] = enemy;
currentEnemies --;
score = score + 50;
}
else if (world[y][x] == playerLaser
&& world[y-1][x] == enemyLaser) {
world[y][x] = ' ';
}
else if (world[y][x] == explosion) {
world[y][x] = ' ';
}
else if ((i+1) % 2 == 0 && world[y][x] == enemyLaser
&& world[y+1][x] == player) {
world[y+1][x] = explosion;
world[y][x] = ' ';
victory = 0;
}
else if (world[y][x] == playerLaser
&& world[y-1][x] != enemyLaser) {
world[y-1][x] = playerLaser;
world[y][x] = ' ';
}
}
}
}
#pragma omp critical
{
//enemy movement (left/right)
for (y = 0; y < sizey; y ++) {
if (world[y][0] == enemy) {
direction = 'r';
drop = 1;
break;
}
if (world[y][sizex-1] == enemy){
direction = 'l';
drop = 1;
break;
}
}
}
#pragma omp critical
{
//enemy movement (down)
if (i % enemySpeed == 0) {
if (direction == 'l') {
for (x = 0; x < sizex - 1; x ++) {
for (y = 0; y < sizey; y ++) {
if (drop && (world[y-1][x+1] == enemy
|| world[y-1][x+1] == enemyShielded)){
world[y][x] = world[y-1][x+1];
world[y-1][x+1] = ' ';
}
else if (!drop && (world[y][x+1] == enemy
|| world[y][x+1] == enemyShielded)) {
world[y][x] = world[y][x+1];
world[y][x+1] = ' ';
}
}
}
}
else {
for (x = sizex; x > 0; x --) {
for (y = 0; y < sizey; y ++) {
if (drop && (world[y-1][x-1] == enemy
|| world[y-1][x-1] == enemyShielded)) {
world[y][x] = world[y-1][x-1];
world[y-1][x-1] = ' ';
}
else if (!drop && (world[y][x-1] == enemy
|| world[y][x-1] == enemyShielded)) {
world[y][x] = world[y][x-1];
world[y][x-1] = ' ';
}
}
}
}
for (x = 0; x < sizex; x ++) {
if (world[sizey - 1][x] == enemy) {
victory = 0;
}
}
}
}
}
//game management for world 2
#pragma omp task shared (world2, score2, victory2)
{
int drop = 0;
int enemySpeed = 1 + 10 * currentEnemies / totalEnemies;
#pragma omp critical
{
for (x = 0; x < sizex; x ++) {
for (y = 0; y < sizey; y ++) {
if ((i % 5) == 0 && (world2[y][x] == enemyShielded
|| world2[y][x] == enemy) && (rand() % 15) > 13
&& world2[y+1][x] != playerLaser) {
for (yi = y+1; yi < sizey; yi ++) {
if (world2[yi][x] == enemy
|| world2[yi][x] == enemyShielded) {
enemyReady = 0;
break;
}
enemyReady = 1;
}
if (enemyReady) {
world2[y+1][x] = enemyLaser;
}
}
if (world2[y][x] == playerLaser && world2[y-1][x] == enemy) {
world2[y][x] = ' ';
world2[y-1][x] = explosion;
currentEnemies --;
score2 = score2 + 50;
}
else if (world2[y][x] == playerLaser
&& world2[y-1][x] == enemyShielded) {
world2[y][x] = ' ';
world2[y-1][x] = enemy;
currentEnemies --;
score2 = score2 + 50;
}
else if (world2[y][x] == playerLaser
&& world2[y-1][x] == enemyLaser) {
world2[y][x] = ' ';
}
else if (world2[y][x] == explosion) {
world2[y][x] = ' ';
}
else if ((i+1) % 2 == 0 && world2[y][x] == enemyLaser
&& world2[y+1][x] == player) {
world2[y+1][x] = explosion;
world2[y][x] = ' ';
victory2 = 0;
}
else if (world2[y][x] == playerLaser
&& world2[y-1][x] != enemyLaser) {
world2[y-1][x] = playerLaser;
world2[y][x] = ' ';
}
}
}
}
#pragma omp critical
{
//Enemy movement (left/right)
for (y = 0; y < sizey; y ++) {
if (world2[y][0] == enemy) {
direction = 'r';
drop = 1;
break;
}
if (world2[y][sizex-1] == enemy){
direction = 'l';
drop = 1;
break;
}
}
}
#pragma omp critical
{
//enemy movement(down)
if (i % enemySpeed == 0) {
if (direction == 'l') {
for (x = 0; x < sizex - 1; x ++) {
for (y = 0; y < sizey; y ++) {
if (drop && (world2[y-1][x+1] == enemy
|| world2[y-1][x+1] == enemyShielded)){
world2[y][x] = world2[y-1][x+1];
world2[y-1][x+1] = ' ';
}
else if (!drop && (world2[y][x+1] == enemy
|| world2[y][x+1] == enemyShielded)) {
world2[y][x] = world2[y][x+1];
world2[y][x+1] = ' ';
}
}
}
}
else {
for (x = sizex; x > 0; x --) {
for (y = 0; y < sizey; y ++) {
if (drop && (world2[y-1][x-1] == enemy
|| world2[y-1][x-1] == enemyShielded)) {
world2[y][x] = world2[y-1][x-1];
world2[y-1][x-1] = ' ';
}
else if (!drop && (world2[y][x-1] == enemy
|| world2[y][x-1] == enemyShielded)) {
world2[y][x] = world2[y][x-1];
world2[y][x-1] = ' ';
}
}
}
}
for (x = 0; x < sizex; x ++) {
if (world2[sizey - 1][x] == enemy) {
victory2 = 0;
}
}
}
}
}
//error handling
#pragma omp taskwait
int a = 0, b = 0;
for(x = 0; x < sizex; x++){
world[sizey-1][x] = ' ';
world2[sizey-1][x] = ' ';
world[0][x] = ' ';
world2[0][x] = ' ';
if(world[sizey-2][x] == player){
a++;
}
if(world2[sizey-2][x] == player){
b++;
}
}
if(a == 0) victory2 = 0;
if(b == 0) victory = 0;
//print area for multiplayer
system("cls");
printf("\t\tSCORE: %d\t\t\t\t SCORE : %d", score, score2);
printf("\n");
for (y = 0; y < sizey; y ++) {
printf("|");
for (x = 0; x < sizex; x ++) {
printf("%c",world[y][x]);
}
printf("|");
for (x = 0; x < sizex; x ++){
printf("%c",world2[y][x]);
}
printf("| \n");
}
Sleep(refresh);
} while (victory && victory2);
}
}
//match report
t2 = omp_get_wtime();
system("cls");
print_gameover();
Sleep(1000);
printf("\n\n\n\n\n\n ");
if(victory && !victory2)printf("Player 2 WIN");
else if (victory2 && !victory) printf("Player 1 WIN");
Sleep(1000);
printf("\n\n Score Player 1 = %d", score);
Sleep(1000);
printf("\n\n Score Player 2 = %d", score2);
Sleep(1000);
printf("\n\n Time = %f", t2-t1);
printf("\n\n\nPress any key to continue...");
getch();
system("cls");
}
|
tinyexr.h | #ifndef TINYEXR_H_
#define TINYEXR_H_
/*
Copyright (c) 2014 - 2021, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
#define TINYEXR_X86_OR_X64_CPU 1
#else
#define TINYEXR_X86_OR_X64_CPU 0
#endif
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || TINYEXR_X86_OR_X64_CPU
#define TINYEXR_LITTLE_ENDIAN 1
#else
#define TINYEXR_LITTLE_ENDIAN 0
#endif
// Use miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
// tile format image;
// not zero for only a single-part "normal" tiled file (according to spec.)
int tiled;
int long_name; // long name attribute
// deep image(EXR 2.0);
// for a multi-part file, indicates that at least one part is of type deep* (according to spec.)
int non_image;
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRBox2i {
int min_x;
int min_y;
int max_x;
int max_y;
} EXRBox2i;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
EXRBox2i data_window;
EXRBox2i display_window;
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
// for a single-part file, agree with the version field bit 11
// for a multi-part file, it is consistent with the type of part
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
// name attribute required for multipart files;
// must be unique and non empty (according to spec.);
// use EXRSetNameAttr for setting value;
// max 255 character allowed - excluding terminating zero
char name[256];
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
struct _EXRImage* next_level; // NULL if scanline format or image is the last level.
int level_x; // x level index
int level_y; // y level index
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifying layer name. Assume EXR image
// contains A(single channel alpha) or RGB(A) channels. Application must free
// image data as returned by `out_rgba` Result image format is: float x RGBA x
// width x hight Returns negative value and may set error string in `err` when
// there's an error When the specified layer name is not found in the EXR file,
// the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name,
const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory
// after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(will be filled when the function returns error
// code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[],
int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Returns the number of resolution levels of the image (including the base)
extern int EXRNumLevels(const EXRImage* exr_image);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Set name attribute of EXRHeader struct (it makes a copy)
extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Frees internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Frees internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Frees error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRMultipartImageToFile(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const char *filename, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEFINED
#define TINYEXR_IMPLEMENTATION_DEFINED
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h> // for UTF-8
#endif
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#include <set>
// https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support
#if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900)
#define TINYEXR_HAS_CXX11 (1)
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#include "deps/miniz/miniz.h"
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Weverything"
#endif
#include "zfp.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(int *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(float *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
float tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
union FP32 {
unsigned int u;
float f;
struct {
#if TINYEXR_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if TINYEXR_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(&outLen);
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int requested_pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
int min_x;
int min_y;
int max_x;
int max_y;
} Box2iInfo;
struct HeaderInfo {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
Box2iInfo data_window;
int line_order;
Box2iInfo display_window;
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tiled; // Non-zero if the part is tiled.
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
// required for multi-part or non-image files
std::string name;
// required for multi-part or non-image files
std::string type;
void clear() {
channels.clear();
attributes.clear();
data_window.min_x = 0;
data_window.min_y = 0;
data_window.max_x = 0;
data_window.max_y = 0;
line_order = 0;
display_window.min_x = 0;
display_window.min_y = 0;
display_window.max_x = 0;
display_window.max_y = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tiled = 0;
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
name.clear();
type.clear();
}
};
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(&info.pixel_type);
tinyexr::swap4(&info.x_sampling);
tinyexr::swap4(&info.y_sampling);
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].requested_pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(&pixel_type);
tinyexr::swap4(&x_sampling);
tinyexr::swap4(&y_sampling);
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
mz_ulong outSize = mz_compressBound(src_size);
int ret = mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressible run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierarchical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
unsigned int len : 8; // code length 0
unsigned int lit : 24; // lit p size
unsigned int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
unsigned int *p = pl->p;
pl->p = new unsigned int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new unsigned int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !TINYEXR_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !TINYEXR_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
unsigned int precision;
unsigned int __pad0;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
unsigned int __pad1;
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0;
}
};
static bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes, std::string *err) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) {
if (attributes[i].size == 1) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
break;
} else {
if (err) {
(*err) +=
"zfpCompressionType attribute must be uchar(1 byte) type.\n";
}
return false;
}
}
}
if (!foundType) {
if (err) {
(*err) += "`zfpCompressionType` attribute not found.\n";
}
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionRate` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionPrecision` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionTolerance` attribute not found.\n";
}
} else {
if (err) {
(*err) += "Unknown value specified for `zfpCompressionType`.\n";
}
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
size_t num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size =
size_t(dst_width) * size_t(dst_num_lines) * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, static_cast<unsigned int>(dst_width),
static_cast<unsigned int>(dst_num_lines) *
static_cast<unsigned int>(num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = size_t(dst_width) * size_t(dst_num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// decompress 4x4 pixel block.
for (size_t y = 0; y < size_t(dst_num_lines); y += 4) {
for (size_t x = 0; x < size_t(dst_width); x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
static bool CompressZfp(std::vector<unsigned char> *outBuf,
unsigned int *outSize, const float *inPtr, int width,
int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, static_cast<unsigned int>(width),
static_cast<unsigned int>(num_lines * num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = size_t(width) * size_t(num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// compress 4x4 pixel block.
for (size_t y = 0; y < size_t(num_lines); y += 4) {
for (size_t x = 0; x < size_t(width); x += 4) {
float fblock[16];
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp));
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// heuristics
#define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192)
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
std::string e;
if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes,
int(num_attributes), &e)) {
// This code path should not be reachable.
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
// Here, data_width and data_height are the dimensions of the current (sub)level.
if (tile_size_x * tile_offset_x > data_width ||
tile_size_y * tile_offset_y > data_height) {
return false;
}
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
#ifdef _WIN32
static inline std::wstring UTF8ToWchar(const std::string &str) {
int wstr_size =
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0);
std::wstring wstr(wstr_size, 0);
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0],
(int)wstr.size());
return wstr;
}
#endif
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
bool has_name = false;
bool has_type = false;
info->name.clear();
info->type.clear();
info->data_window.min_x = 0;
info->data_window.min_y = 0;
info->data_window.max_x = 0;
info->data_window.max_y = 0;
info->line_order = 0; // @fixme
info->display_window.min_x = 0;
info->display_window.min_y = 0;
info->display_window.max_x = 0;
info->display_window.max_y = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tiled = 0;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
// For a multipart file, the version field 9th bit is 0.
if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) ||
y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
if (err) {
(*err) = "Tile sizes were invalid.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
info->tiled = 1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->data_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->data_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->data_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->data_window.min_x);
tinyexr::swap4(&info->data_window.min_y);
tinyexr::swap4(&info->data_window.max_x);
tinyexr::swap4(&info->data_window.max_y);
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->display_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->display_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->display_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->display_window.min_x);
tinyexr::swap4(&info->display_window.min_y);
tinyexr::swap4(&info->display_window.max_x);
tinyexr::swap4(&info->display_window.max_y);
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(&info->pixel_aspect_ratio);
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(&info->screen_window_center[0]);
tinyexr::swap4(&info->screen_window_center[1]);
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(&info->screen_window_width);
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(&info->chunk_count);
}
} else if (attr_name.compare("name") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->name.resize(len);
info->name.assign(reinterpret_cast<const char*>(&data[0]), len);
has_name = true;
}
} else if (attr_name.compare("type") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->type.resize(len);
info->type.assign(reinterpret_cast<const char*>(&data[0]), len);
has_type = true;
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (version->multipart || version->non_image) {
if (!has_name) {
ss_err << "\"name\" attribute not found in the header."
<< std::endl;
}
if (!has_type) {
ss_err << "\"type\" attribute not found in the header."
<< std::endl;
}
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window.min_x = info.display_window.min_x;
exr_header->display_window.min_y = info.display_window.min_y;
exr_header->display_window.max_x = info.display_window.max_x;
exr_header->display_window.max_y = info.display_window.max_y;
exr_header->data_window.min_x = info.data_window.min_x;
exr_header->data_window.min_y = info.data_window.min_y;
exr_header->data_window.max_x = info.data_window.max_x;
exr_header->data_window.max_y = info.data_window.max_y;
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tiled = info.tiled;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
EXRSetNameAttr(exr_header, info.name.c_str());
if (!info.type.empty()) {
if (info.type == "scanlineimage") {
assert(!exr_header->tiled);
} else if (info.type == "tiledimage") {
assert(exr_header->tiled);
} else if (info.type == "deeptile") {
exr_header->non_image = 1;
assert(exr_header->tiled);
} else if (info.type == "deepscanline") {
exr_header->non_image = 1;
assert(!exr_header->tiled);
} else {
assert(false);
}
}
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy pointer
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
struct OffsetData {
OffsetData() : num_x_levels(0), num_y_levels(0) {}
std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets;
int num_x_levels;
int num_y_levels;
};
int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) {
switch (tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
return 0;
case TINYEXR_TILE_MIPMAP_LEVELS:
return lx;
case TINYEXR_TILE_RIPMAP_LEVELS:
return lx + ly * num_x_levels;
default:
assert(false);
}
return 0;
}
static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) {
assert(level >= 0);
int b = (int)(1u << (unsigned)level);
int level_size = toplevel_size / b;
if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size)
level_size += 1;
return std::max(level_size, 1);
}
static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header,
const OffsetData& offset_data,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const unsigned char* head, const size_t size,
std::string* err) {
int num_channels = exr_header->num_channels;
int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels);
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
int num_tiles = num_x_tiles * num_y_tiles;
int err_code = TINYEXR_SUCCESS;
enum {
EF_SUCCESS = 0,
EF_INVALID_DATA = 1,
EF_INSUFFICIENT_DATA = 2,
EF_FAILED_TO_DECODE = 4
};
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<unsigned> error_flag(EF_SUCCESS);
#else
unsigned error_flag(EF_SUCCESS);
#endif
// Although the spec says : "...the data window is subdivided into an array of smaller rectangles...",
// the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window.
#if 0
if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) &&
exr_image->level_x == 0 && exr_image->level_y == 0) {
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
#endif
exr_image->tiles = static_cast<EXRTile*>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]()
{
int tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
int x_tile = tile_idx % num_x_tiles;
int y_tile = tile_idx / num_x_tiles;
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile];
if (offset + sizeof(int) * 5 > size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
size_t data_size =
size_t(size - (offset + sizeof(int) * 5));
const unsigned char* data_ptr =
reinterpret_cast<const unsigned char*>(head + offset);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(&tile_coordinates[0]);
tinyexr::swap4(&tile_coordinates[1]);
tinyexr::swap4(&tile_coordinates[2]);
tinyexr::swap4(&tile_coordinates[3]);
if (tile_coordinates[2] != exr_image->level_x) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
if (tile_coordinates[3] != exr_image->level_y) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(&data_len);
if (data_len < 2 || size_t(data_len) > data_size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order,
exr_image->width, exr_image->height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// Failed to decode tile data.
error_flag |= EF_FAILED_TO_DECODE;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto& t : workers) {
t.join();
}
#else
} // parallel for
#endif
// Even in the event of an error, the reserved memory may be freed.
exr_image->num_channels = num_channels;
exr_image->num_tiles = static_cast<int>(num_tiles);
if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA;
if (err) {
if (error_flag & EF_INSUFFICIENT_DATA) {
(*err) += "Insufficient data length.\n";
}
if (error_flag & EF_FAILED_TO_DECODE) {
(*err) += "Failed to decode tile data.\n";
}
}
return err_code;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const OffsetData& offset_data,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param,
exr_header->custom_attributes,
int(exr_header->num_custom_attributes), err)) {
return TINYEXR_ERROR_INVALID_HEADER;
}
#endif
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_y < exr_header->data_window.min_y) {
if (err) {
(*err) += "Invalid data window.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tiled) {
if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x
<< ", "
<< "tile height = " << exr_header->tile_size_y << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) {
EXRImage* level_image = NULL;
for (int level = 0; level < offset_data.num_x_levels; ++level) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode);
level_image->level_x = level;
level_image->level_y = level;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
} else {
EXRImage* level_image = NULL;
for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y)
for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode);
level_image->level_x = level_x;
level_image->level_y = level_y;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(&line_no);
tinyexr::swap4(&data_len);
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window.max_y + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window.min_y);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window.min_y;
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(&y);
tinyexr::swap4(&data_len);
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int FloorLog2(unsigned x) {
//
// For x > 0, floorLog2(y) returns floor(log(x)/log(2)).
//
int y = 0;
while (x > 1) {
y += 1;
x >>= 1u;
}
return y;
}
static int CeilLog2(unsigned x) {
//
// For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)).
//
int y = 0;
int r = 0;
while (x > 1) {
if (x & 1)
r = 1;
y += 1;
x >>= 1u;
}
return y + r;
}
static int RoundLog2(int x, int tile_rounding_mode) {
return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x));
}
static int CalculateNumXLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
num = RoundLog2(w, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static int CalculateNumYLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int h = max_y - min_y + 1;
num = RoundLog2(h, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static void CalculateNumTiles(std::vector<int>& numTiles,
int toplevel_size,
int size,
int tile_rounding_mode) {
for (unsigned i = 0; i < numTiles.size(); i++) {
int l = LevelSize(toplevel_size, i, tile_rounding_mode);
assert(l <= std::numeric_limits<int>::max() - size + 1);
numTiles[i] = (l + size - 1) / size;
}
}
static void PrecalculateTileInfo(std::vector<int>& num_x_tiles,
std::vector<int>& num_y_tiles,
const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num_x_levels = CalculateNumXLevels(exr_header);
int num_y_levels = CalculateNumYLevels(exr_header);
num_x_tiles.resize(num_x_levels);
num_y_tiles.resize(num_y_levels);
CalculateNumTiles(num_x_tiles,
max_x - min_x + 1,
exr_header->tile_size_x,
exr_header->tile_rounding_mode);
CalculateNumTiles(num_y_tiles,
max_y - min_y + 1,
exr_header->tile_size_y,
exr_header->tile_rounding_mode);
}
static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) {
offset_data.offsets.resize(1);
offset_data.offsets[0].resize(1);
offset_data.offsets[0][0].resize(num_blocks);
offset_data.num_x_levels = 1;
offset_data.num_y_levels = 1;
}
// Return sum of tile blocks.
static int InitTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const std::vector<int>& num_x_tiles,
const std::vector<int>& num_y_tiles) {
int num_tile_blocks = 0;
offset_data.num_x_levels = static_cast<int>(num_x_tiles.size());
offset_data.num_y_levels = static_cast<int>(num_y_tiles.size());
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
case TINYEXR_TILE_MIPMAP_LEVELS:
assert(offset_data.num_x_levels == offset_data.num_y_levels);
offset_data.offsets.resize(offset_data.num_x_levels);
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
offset_data.offsets[l].resize(num_y_tiles[l]);
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[l]);
num_tile_blocks += num_x_tiles[l];
}
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels));
for (int ly = 0; ly < offset_data.num_y_levels; ++ly) {
for (int lx = 0; lx < offset_data.num_x_levels; ++lx) {
int l = ly * offset_data.num_x_levels + lx;
offset_data.offsets[l].resize(num_y_tiles[ly]);
for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[lx]);
num_tile_blocks += num_x_tiles[lx];
}
}
}
break;
default:
assert(false);
}
return num_tile_blocks;
}
static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx)
if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0)
return true;
return false;
}
static bool isValidTile(const EXRHeader* exr_header,
const OffsetData& offset_data,
int dx, int dy, int lx, int ly) {
if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false;
int num_x_levels = offset_data.num_x_levels;
int num_y_levels = offset_data.num_y_levels;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
if (lx == 0 &&
ly == 0 &&
offset_data.offsets.size() > 0 &&
offset_data.offsets[0].size() > static_cast<size_t>(dy) &&
offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
if (lx < num_x_levels &&
ly < num_y_levels &&
offset_data.offsets.size() > static_cast<size_t>(lx) &&
offset_data.offsets[lx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels);
if (lx < num_x_levels &&
ly < num_y_levels &&
(offset_data.offsets.size() > idx) &&
offset_data.offsets[idx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
}
break;
default:
return false;
}
return false;
}
static void ReconstructTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const unsigned char* head, const unsigned char* marker, const size_t /*size*/,
bool isMultiPartFile,
bool isDeep) {
int numXLevels = offset_data.num_x_levels;
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 tileOffset = marker - head;
if (isMultiPartFile) {
//int partNumber;
marker += sizeof(int);
}
int tileX;
memcpy(&tileX, marker, sizeof(int));
tinyexr::swap4(&tileX);
marker += sizeof(int);
int tileY;
memcpy(&tileY, marker, sizeof(int));
tinyexr::swap4(&tileY);
marker += sizeof(int);
int levelX;
memcpy(&levelX, marker, sizeof(int));
tinyexr::swap4(&levelX);
marker += sizeof(int);
int levelY;
memcpy(&levelY, marker, sizeof(int));
tinyexr::swap4(&levelY);
marker += sizeof(int);
if (isDeep) {
tinyexr::tinyexr_int64 packed_offset_table_size;
memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size));
marker += sizeof(tinyexr::tinyexr_int64);
tinyexr::tinyexr_int64 packed_sample_size;
memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size));
marker += sizeof(tinyexr::tinyexr_int64);
// next Int64 is unpacked sample size - skip that too
marker += packed_offset_table_size + packed_sample_size + 8;
} else {
int dataSize;
memcpy(&dataSize, marker, sizeof(int));
tinyexr::swap4(&dataSize);
marker += sizeof(int);
marker += dataSize;
}
if (!isValidTile(exr_header, offset_data,
tileX, tileY, levelX, levelY))
return;
int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels);
offset_data.offsets[level_idx][tileY][tileX] = tileOffset;
}
}
}
}
// marker output is also
static int ReadOffsets(OffsetData& offset_data,
const unsigned char* head,
const unsigned char*& marker,
const size_t size,
const char** err) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offset_data.offsets[l][dy][dx] = offset;
}
}
}
return TINYEXR_SUCCESS;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_x - exr_header->data_window.min_x ==
std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
if (exr_header->data_window.max_y < exr_header->data_window.min_y ||
exr_header->data_window.max_y - exr_header->data_window.min_y ==
std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if (data_width > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
if (exr_header->tiled) {
if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
OffsetData offset_data;
size_t num_blocks = 0;
// For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header.
// If chunk_count > 0 then chunk_count must be equal to the calculated tile count.
if (exr_header->tiled) {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header);
num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles);
if (exr_header->chunk_count > 0) {
if (exr_header->chunk_count != static_cast<int>(num_blocks)) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
int ret = ReadOffsets(offset_data, head, marker, size, err);
if (ret != TINYEXR_SUCCESS) return ret;
if (IsAnyOffsetsAreInvalid(offset_data)) {
ReconstructTileOffsets(offset_data, exr_header,
head, marker, size,
exr_header->multipart, exr_header->non_image);
}
} else if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
InitSingleResolutionOffsets(offset_data, num_blocks);
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
InitSingleResolutionOffsets(offset_data, num_blocks);
}
if (!exr_header->tiled) {
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader &exr_header,
std::vector<std::string> &layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) ==
layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader &exr_header,
const std::string layer_name,
std::vector<LayerChannel> &channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos) continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers,
const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename,
/* layername */ NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code("
<< ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(
exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
} else if (ch.name == "G") {
idxG = int(ch.index);
} else if (ch.name == "B") {
idxB = int(ch.index);
} else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii = exr_image.tiles[it].offset_x *
static_cast<int>(exr_header.tile_size_x) +
i;
const int jj = exr_image.tiles[it].offset_y *
static_cast<int>(exr_header.tile_size_y) +
j;
const int idx = ii + jj * static_cast<int>(exr_image.width);
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val =
reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
exr_header->multipart = version->multipart ? 1 : 0;
exr_header->non_image = version->non_image ? 1 : 0;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
namespace tinyexr
{
// out_data must be allocated initially with the block-header size
// of the current image(-part) type
static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data,
const unsigned char* const* images,
int compression_type,
int /*line_order*/,
int width, // for tiled : tile.width
int /*height*/, // for tiled : header.tile_size_y
int x_stride, // for tiled : header.tile_size_x
int line_no, // for tiled : 0
int num_lines, // for tiled : tile.height
size_t pixel_data_size,
const std::vector<ChannelInfo>& channels,
const std::vector<size_t>& channel_offset_list,
const void* compression_param = 0) // zfp compression param
{
size_t buf_size = static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
static_cast<size_t>(pixel_data_size);
//int last2bit = (buf_size & 3);
// buf_size must be multiple of four
//if(last2bit) buf_size += 4 - last2bit;
std::vector<unsigned char> buf(buf_size);
size_t start_y = static_cast<size_t>(line_no);
for (size_t c = 0; c < channels.size(); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(&f32.f);
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned short val = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
float val = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned int val = reinterpret_cast<const unsigned int * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
out_data.insert(out_data.end(), buf.begin(), buf.end());
} else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, width, num_lines);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
assert(0);
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param);
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
(void)compression_param;
assert(0);
#endif
} else {
assert(0);
return false;
}
return true;
}
static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header,
const std::vector<tinyexr::ChannelInfo>& channels,
std::vector<std::vector<unsigned char> >& data_list,
size_t start_index, // for data_list
int num_x_tiles, int num_y_tiles,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const void* compression_param, // must be set if zfp compression is enabled
std::string* err) {
int num_tiles = num_x_tiles * num_y_tiles;
assert(num_tiles == level_image->num_tiles);
if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) &&
level_image->level_x == 0 && level_image->level_y == 0) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = tile_count++) < num_tiles) {
#else
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_tiles; i++) {
#endif
size_t tile_idx = static_cast<size_t>(i);
size_t data_idx = tile_idx + start_index;
int x_tile = i % num_x_tiles;
int y_tile = i / num_x_tiles;
EXRTile& tile = level_image->tiles[tile_idx];
const unsigned char* const* images =
static_cast<const unsigned char* const*>(tile.images);
data_list[data_idx].resize(5*sizeof(int));
size_t data_header_size = data_list[data_idx].size();
bool ret = EncodePixelData(data_list[data_idx],
images,
exr_header->compression_type,
0, // increasing y
tile.width,
exr_header->tile_size_y,
exr_header->tile_size_x,
0,
tile.height,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue;
}
assert(data_list[data_idx].size() > data_header_size);
int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size);
//tileX, tileY, levelX, levelY // pixel_data_size(int)
memcpy(&data_list[data_idx][0], &x_tile, sizeof(int));
memcpy(&data_list[data_idx][4], &y_tile, sizeof(int));
memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int));
memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int));
memcpy(&data_list[data_idx][16], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[data_idx][0]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][4]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][8]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][12]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][16]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
return TINYEXR_SUCCESS;
}
static int NumScanlines(int compression_type) {
int num_scanlines = 1;
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
return num_scanlines;
}
static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header,
const std::vector<ChannelInfo>& channels,
int num_blocks,
tinyexr_uint64 chunk_offset, // starting offset of current chunk
bool is_multipart,
OffsetData& offset_data, // output block offsets, must be initialized
std::vector<std::vector<unsigned char> >& data_list, // output
tinyexr_uint64& total_size, // output: ending offset of current chunk
std::string* err) {
int num_scanlines = NumScanlines(exr_header->compression_type);
data_list.resize(num_blocks);
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
{
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (channels[c].requested_pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
}
const void* compression_param = 0;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
std::string e;
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes, &e);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
compression_param = &zfp_compression_param;
}
#endif
tinyexr_uint64 offset = chunk_offset;
tinyexr_uint64 doffset = is_multipart ? 4u : 0u;
if (exr_image->tiles) {
const EXRImage* level_image = exr_image;
size_t block_idx = 0;
tinyexr::tinyexr_uint64 block_data_size = 0;
int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
if (!level_image) {
if (err) {
(*err) += "Invalid number of tiled levels for EncodeChunk\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y,
exr_header->tile_level_mode, offset_data.num_x_levels);
if (level_index_from_image != level_index) {
if (err) {
(*err) += "Incorrect level ordering in tiled image\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
std::string e;
int ret = EncodeTiledLevel(level_image,
exr_header,
channels,
data_list,
block_idx,
num_x_tiles,
num_y_tiles,
channel_offset_list,
pixel_data_size,
compression_param,
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty() && err) {
(*err) += e;
}
return ret;
}
for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j)
for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) {
offset_data.offsets[level_index][j][i] = offset;
swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i]));
offset += data_list[block_idx].size() + doffset;
block_data_size += data_list[block_idx].size();
++block_idx;
}
level_image = level_image->next_level;
}
assert(static_cast<int>(block_idx) == num_blocks);
total_size = offset;
} else { // scanlines
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
std::vector<std::thread> workers;
std::atomic<int> block_count(0);
int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks);
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = block_count++) < num_blocks) {
#else
bool invalid_data(false);
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
#endif
int start_y = num_scanlines * i;
int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height);
int num_lines = end_Y - start_y;
const unsigned char* const* images =
static_cast<const unsigned char* const*>(exr_image->images);
data_list[i].resize(2*sizeof(int));
size_t data_header_size = data_list[i].size();
bool ret = EncodePixelData(data_list[i],
images,
exr_header->compression_type,
0, // increasing y
exr_image->width,
exr_image->height,
exr_image->width,
start_y,
num_lines,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue; // "break" cannot be used with OpenMP
}
assert(data_list[i].size() > data_header_size);
int data_len = static_cast<int>(data_list[i].size() - data_header_size);
memcpy(&data_list[i][0], &start_y, sizeof(int));
memcpy(&data_list[i][4], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[i][0]));
swap4(reinterpret_cast<int*>(&data_list[i][4]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode scanline data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size() + doffset;
}
total_size = static_cast<size_t>(offset);
}
return TINYEXR_SUCCESS;
}
// can save a single or multi-part image (no deep* formats)
static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory_out == NULL) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
{
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_headers[i]->compression_type < 0) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#else
for (int c = 0; c < exr_header->num_channels; ++c) {
if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) {
SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
}
}
std::vector<unsigned char> memory;
// Header
{
const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
memory.insert(memory.end(), header, header + 4);
}
// Version
// using value from the first header
int long_name = exr_headers[0]->long_name;
{
char marker[] = { 2, 0, 0, 0 };
/* @todo
if (exr_header->non_image) {
marker[1] |= 0x8;
}
*/
// tiled
if (num_parts == 1 && exr_images[0].tiles) {
marker[1] |= 0x2;
}
// long_name
if (long_name) {
marker[1] |= 0x4;
}
// multipart
if (num_parts > 1) {
marker[1] |= 0x10;
}
memory.insert(memory.end(), marker, marker + 4);
}
int total_chunk_count = 0;
std::vector<int> chunk_count(num_parts);
std::vector<OffsetData> offset_data(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
if (!exr_images[i].tiles) {
int num_scanlines = NumScanlines(exr_headers[i]->compression_type);
chunk_count[i] =
(exr_images[i].height + num_scanlines - 1) / num_scanlines;
InitSingleResolutionOffsets(offset_data[i], chunk_count[i]);
total_chunk_count += chunk_count[i];
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
chunk_count[i] =
InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles);
total_chunk_count += chunk_count[i];
}
}
}
// Write attributes to memory buffer.
std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts);
{
std::set<std::string> partnames;
for (unsigned int i = 0; i < num_parts; ++i) {
//channels
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_headers[i]->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_headers[i]->pixel_types[c];
info.requested_pixel_type = exr_headers[i]->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_headers[i]->channels[c].name);
channels[i].push_back(info);
}
tinyexr::WriteChannelInfo(data, channels[i]);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_headers[i]->compression_type;
swap4(&comp);
WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char*>(&comp), 1);
}
{
int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 };
swap4(&data[0]);
swap4(&data[1]);
swap4(&data[2]);
swap4(&data[3]);
WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4);
int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 };
swap4(&data0[0]);
swap4(&data0[1]);
swap4(&data0[2]);
swap4(&data0[3]);
// Note: must be the same across parts (currently, using value from the first header)
WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
// Note: must be the same across parts
float aspectRatio = 1.0f;
swap4(&aspectRatio);
WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float));
}
{
float center[2] = { 0.0f, 0.0f };
swap4(¢er[0]);
swap4(¢er[1]);
WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float));
}
{
float w = 1.0f;
swap4(&w);
WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char*>(&w),
sizeof(float));
}
if (exr_images[i].tiles) {
unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3);
if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u);
//unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
unsigned int datai[3] = { 0, 0, 0 };
unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]);
datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x);
datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y);
data[8] = tile_mode;
swap4(reinterpret_cast<unsigned int*>(&data[0]));
swap4(reinterpret_cast<unsigned int*>(&data[4]));
WriteAttributeToMemory(
&memory, "tiles", "tiledesc",
reinterpret_cast<const unsigned char*>(data), 9);
}
// must be present for multi-part files - according to spec.
if (num_parts > 1) {
// name
{
size_t len = 0;
if ((len = strlen(exr_headers[i]->name)) > 0) {
partnames.insert(std::string(exr_headers[i]->name));
if (partnames.size() != i + 1) {
SetErrorMessage("'name' attributes must be unique for a multi-part file", err);
return 0;
}
WriteAttributeToMemory(
&memory, "name", "string",
reinterpret_cast<const unsigned char*>(exr_headers[i]->name),
static_cast<int>(len));
} else {
SetErrorMessage("Invalid 'name' attribute for a multi-part file", err);
return 0;
}
}
// type
{
const char* type = "scanlineimage";
if (exr_images[i].tiles) type = "tiledimage";
WriteAttributeToMemory(
&memory, "type", "string",
reinterpret_cast<const unsigned char*>(type),
static_cast<int>(strlen(type)));
}
// chunkCount
{
WriteAttributeToMemory(
&memory, "chunkCount", "int",
reinterpret_cast<const unsigned char*>(&chunk_count[i]),
4);
}
}
// Custom attributes
if (exr_headers[i]->num_custom_attributes > 0) {
for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_headers[i]->custom_attributes[j].name,
exr_headers[i]->custom_attributes[j].type,
reinterpret_cast<const unsigned char*>(
exr_headers[i]->custom_attributes[j].value),
exr_headers[i]->custom_attributes[j].size);
}
}
{ // end of header
memory.push_back(0);
}
}
}
if (num_parts > 1) {
// end of header list
memory.push_back(0);
}
tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64);
tinyexr_uint64 total_size = 0;
std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
std::string e;
int ret = EncodeChunk(&exr_images[i], exr_headers[i],
channels[i],
chunk_count[i],
// starting offset of current chunk after part-number
chunk_offset,
num_parts > 1,
offset_data[i], // output: block offsets, must be initialized
data_lists[i], // output
total_size, // output
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return 0;
}
chunk_offset = total_size;
}
// Allocating required memory
if (total_size == 0) { // something went wrong
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char*>(malloc(total_size));
// Writing header
memcpy((*memory_out), &memory[0], memory.size());
unsigned char* memory_ptr = *memory_out + memory.size();
size_t sum = memory.size();
// Writing offset data for chunks
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_images[i].tiles) {
const EXRImage* level_image = &exr_images[i];
int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) {
size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size();
sum += num_bytes;
assert(sum <= total_size);
memcpy(memory_ptr,
reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]),
num_bytes);
memory_ptr += num_bytes;
}
level_image = level_image->next_level;
}
} else {
size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]);
sum += num_bytes;
assert(sum <= total_size);
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0];
memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes);
memory_ptr += num_bytes;
}
}
// Writing chunk data
for (unsigned int i = 0; i < num_parts; ++i) {
for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) {
if (num_parts > 1) {
sum += 4;
assert(sum <= total_size);
unsigned int part_number = i;
swap4(&part_number);
memcpy(memory_ptr, &part_number, 4);
memory_ptr += 4;
}
sum += data_lists[i][j].size();
assert(sum <= total_size);
memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size());
memory_ptr += data_lists[i][j].size();
}
}
assert(sum == total_size);
return total_size; // OK
}
} // tinyexr
size_t SaveEXRImageToMemory(const EXRImage* exr_image,
const EXRHeader* exr_header,
unsigned char** memory_out, const char** err) {
return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err);
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2 ||
memory_out == NULL) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err);
}
int SaveEXRMultipartImageToFile(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
const char* filename,
const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(&dx);
tinyexr::swap4(&dy);
tinyexr::swap4(&dw);
tinyexr::swap4(&dh);
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(&x);
tinyexr::swap4(&y);
tinyexr::swap4(&w);
tinyexr::swap4(&h);
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(&line_no);
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->next_level = NULL;
exr_image->level_x = 0;
exr_image->level_y = 0;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
EXRSetNameAttr(exr_header, NULL);
return TINYEXR_SUCCESS;
}
void EXRSetNameAttr(EXRHeader* exr_header, const char* name) {
if (exr_header == NULL) {
return;
}
memset(exr_header->name, 0, 256);
if (name != NULL) {
size_t len = std::min(strlen(name), (size_t)255);
if (len) {
memcpy(exr_header->name, name, len);
}
}
}
int EXRNumLevels(const EXRImage* exr_image) {
if (exr_image == NULL) return 0;
if(exr_image->images) return 1; // scanlines
int levels = 1;
const EXRImage* level_image = exr_image;
while((level_image = level_image->next_level)) ++levels;
return levels;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_image->next_level) {
FreeEXRImage(exr_image->next_level);
delete exr_image->next_level;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
memset(exr_header, 0, sizeof(EXRHeader));
ConvertHeader(exr_header, infos[i]);
exr_header->multipart = exr_version->multipart ? 1 : 0;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (err != 0) {
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<tinyexr::OffsetData> chunk_offset_table_list;
chunk_offset_table_list.reserve(num_parts);
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1);
tinyexr::OffsetData& offset_data = chunk_offset_table_list.back();
if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) {
tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count);
std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0];
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles);
if (num_blocks != exr_headers[i]->chunk_count) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number'
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
}
}
}
}
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
tinyexr::OffsetData &offset_data = chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
const unsigned char *part_number_addr =
memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEFINED
#endif // TINYEXR_IMPLEMENTATION
|
deconvolution_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack1to4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack1ton, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
v4f32 _sum = (v4f32)__msa_fill_w(0);
if (bias_data_ptr)
{
_sum = (v4f32)__msa_ld_w((const float*)bias_data_ptr + p * 4, 0);
}
const float* kptr = (const float*)weight_data_pack1ton + maxk * channels * p * 4;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const float* sptr = m.row(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
float val = sptr[sx];
int k = y * kernel_w + x;
v4f32 _val = (v4f32)__msa_fill_w_f32(val);
v4f32 _w = (v4f32)__msa_ld_w(kptr + k * 4, 0);
_sum = __msa_fmadd_w(_sum, _val, _w);
}
}
kptr += maxk * 4;
}
_sum = activation_ps(_sum, activation_type, activation_params);
__msa_st_w((v4i32)_sum, outptr + j * 4, 0);
}
outptr += outw * 4;
}
}
}
|
weightCl.c | // % function [wcl,pc]= weightCl(E,no_allcl)
// %==========================================================================
// % FUNCTION: wcl = weightCl(E)
// % DESCRIPTION: This function computes weight for each pair of clusters using
// % their shared members (Jaccard Coefficient)
// %
// % INPUTS: E = N-by-M matrix of cluster ensemble
// %
// % OUTPUT: wcl = an weighted cluster matrix
// %==========================================================================
// % copyright (c) 2010 Iam-on & Garrett
// % optimization for speed: Nejc Ilc, 2014
// %==========================================================================
// compile: mex -largeArrayDims OPTIMFLAGS="/openmp $OPTIMFLAGS" weightCl.c
#include "mex.h"
#include <math.h>
#include <omp.h>
#include "limits.h"
long long myRound(double x) { return (long long)floor(x+0.5); }
/* The gateway function */
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
{
long long nCls; /* input: number of all clusters */
double *E; /* MxN input matrix E */
long long N,M; /* size of matrix E */
double * WCL; /* output matrix WCL */
double * PC; /* output matrix PC */
//mwSize PC_dims[2]; /* dimensions of PC*/
long long Ei, WCLi, PCi;
long long Erow,label, WCLrow, WCLcol;
long long numPair;
long long numIntersect, numUnion;
int PCi_sum;
double WCLval;
/* check for proper number of arguments */
if(nrhs != 2) {
mexErrMsgIdAndTxt("LCE:weightCl:nrhs","Two inputs required.");
}
if(nlhs < 1) {
mexErrMsgIdAndTxt("LCE:weightCl:nlhs","At least one output required.");
}
if(nlhs > 2) {
mexErrMsgIdAndTxt("LCE:weightCl:nlhs","Max two output required.");
}
/* make sure the second input argument is scalar */
if( !mxIsDouble(prhs[1]) ||
mxIsComplex(prhs[1]) ||
mxGetNumberOfElements(prhs[1])!=1 ) {
mexErrMsgIdAndTxt("LCE:weightCl:notScalar","Second input argument must be a scalar.");
}
/* get number of rows and columns of first input argument */
N = mxGetM(prhs[0]); // number of data points
M = mxGetN(prhs[0]); // number of ensemble members
/* get the value of the scalar input */
nCls = (long long)mxGetScalar(prhs[1]);
/* create a pointer to the real data in the input matrix */
E = mxGetPr(prhs[0]);
/* create the output matrix WCL */
plhs[0] = mxCreateDoubleMatrix(nCls,nCls,mxREAL);
/* get a pointer to the real data in the output matrix */
WCL = mxGetPr(plhs[0]);
/* create the output matrix WCL */
//PC_dims[0] = N;
//PC_dims[1] = nCls;
//plhs[1] = mxCreateNumericArray(2,PC_dims,mxINT8_CLASS,mxREAL);
plhs[1] = mxCreateDoubleMatrix(N,nCls,mxREAL);
/* get a pointer to the real data in the output matrix */
PC = mxGetPr(plhs[1]);
// % pc = zeros(N,no_allcl); % matrix indicates if data point belongs to the cluster (1=y, 0=n), row=data, col = cluster
// % for i=1:N
// % pc(i,E(i,:))=1; % pc(i,j) = 1 if data i belongs to cluster j
// % end
// for (i=0; i<N; i++){
// for (j=0; j<M; j++){
// label = E[i+j*N];
// mexPrintf("i:%d, j: %d, E: %d, PCind: %d\n", i,j,label, i+(label-1)*N);
// PC[i+(label-1)*N] = 1;
// }
// }
#pragma omp parallel for shared(E, N, M) private(Ei,Erow,label)
for (Ei=0; Ei<N*M; Ei++){
Erow = Ei % N;
label = (long long) E[Ei];
//mexPrintf("Ei:%d, Erow: %d, label: %d, PCind: %d\n", Ei,Erow,label, Erow+(label-1)*N);
PC[Erow+(label-1)*N] = 1;
}
// % %find number of shared data points for each pair of clusters ==> intersect/union
// % wcl = zeros(no_allcl,no_allcl);
// % for i=1:no_allcl-1
// % for ii=i+1:no_allcl
// % pcSum = pc(:,i)+pc(:,ii);
// % tmp = sum(pcSum>0);
// % if tmp > 0
// % wcl(i,ii) = sum(pcSum==2) / tmp; %intersection/union
// % end
// % end
// % end
// % wcl = wcl + wcl';
//mexPrintf("int: %d, long: %d, double: %d, mwSize: %d\n", sizeof(int),sizeof(long),sizeof(double),sizeof(mwSize));
//mexPrintf("INT_MAX: %ul\n",UINT_MAX);
//mexPrintf("myRound(0.5)=%lf\n",myRound(0.49999));
numPair = nCls*(nCls-1)/2;
#pragma omp parallel for
for (WCLi=0; WCLi < numPair; WCLi++){
//mexPrintf("Num threads %d, thread ID %d.\n", omp_get_num_threads(), omp_get_thread_num());
// Compute indeces of upper triangular matrix
WCLcol = myRound(floor(-0.5 + 0.5 * sqrt(1 + 8.0 * WCLi)) + 2);
WCLrow = myRound(WCLcol * (3.0 - WCLcol) / 2.0 + WCLi)-1;
WCLcol -= 1;
numIntersect = 0;
numUnion = 0;
for (PCi=0; PCi<N; PCi++){
PCi_sum = (int)(PC[WCLrow*N + PCi] + PC[WCLcol*N + PCi]);
if (PCi_sum > 0){
numUnion++;
}
if (PCi_sum == 2){
numIntersect++;
}
}
if(numUnion > 0){
WCLval = numIntersect / (double)numUnion;
WCL[WCLcol*nCls + WCLrow] = WCLval;
WCL[WCLrow*nCls + WCLcol] = WCLval;
}
}
}
|
lapw_mex.c | #include <inttypes.h>
#include <math.h>
#include <omp.h>
#include "mex.h"
#define PI 3.14159265358979323846
#define TAU (2*PI)
#define WRAPF(u) (((u) < -(float)PI) || ((u) > (float)PI))
#define WRAPD(u) (((u) < -(double)PI) || ((u) > (double)PI))
void lapwf(float *du, const float *u, const double *h, const size_t *sz);
void lapwd(double *du, const double *u, const double *h, const size_t *sz);
float wraptopif(float x);
double wraptopid(double x);
void
mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs != 3) || (nlhs > 1)) {
mexErrMsgTxt("Usage: lapw_mex(d2u, u, h);");
return;
}
const double *h = (const double *)mxGetData(prhs[2]);
const size_t *sz = (const size_t *)mxGetDimensions(prhs[0]);
if (mxIsSingle(prhs[0])) {
float *du = (float *)mxGetData(prhs[0]);
const float *u = (const float *)mxGetData(prhs[1]);
lapwf(du, u, h, sz);
} else {
double *du = (double *)mxGetData(prhs[0]);
const double *u = (const double *)mxGetData(prhs[1]);
lapwd(du, u, h, sz);
}
if (nlhs == 1) {
plhs[0] = mxCreateDoubleScalar(1.0);
}
return;
}
void
lapwf(float *du, const float *u, const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
float ub, uf;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const float hx = (float)(1.0/(h[0]*h[0]));
const float hy = (float)(1.0/(h[1]*h[1]));
const float hz = (float)(1.0/(h[2]*h[2]));
#pragma omp parallel for private(i,j,k,l,ub,uf) schedule(static) \
if(nxny*nz > 16*16*16)
for(k = nxny; k < NZ; k += nxny) {
for(j = nx; j < NY; j += nx) {
l = 1 + j + k;
for(i = 1; i < NX; ++i, ++l) {
ub = u[l] - u[l-1];
uf = u[l+1] - u[l];
ub = WRAPF(ub) ? wraptopif(ub) : ub;
uf = WRAPF(uf) ? wraptopif(uf) : uf;
du[l] = hx*(uf - ub);
ub = u[l] - u[l-nx];
uf = u[l+nx] - u[l];
ub = WRAPF(ub) ? wraptopif(ub) : ub;
uf = WRAPF(uf) ? wraptopif(uf) : uf;
du[l] = du[l] + hy*(uf - ub);
ub = u[l] - u[l-nxny];
uf = u[l+nxny] - u[l];
ub = WRAPF(ub) ? wraptopif(ub) : ub;
uf = WRAPF(uf) ? wraptopif(uf) : uf;
du[l] = du[l] + hz*(uf - ub);
}
}
}
return;
}
void
lapwd(double *du, const double *u, const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
double ub, uf;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const double hx = 1.0/(h[0]*h[0]);
const double hy = 1.0/(h[1]*h[1]);
const double hz = 1.0/(h[2]*h[2]);
#pragma omp parallel for private(i,j,k,l,ub,uf) schedule(static) \
if(nxny*nz > 16*16*16)
for(k = nxny; k < NZ; k += nxny) {
for(j = nx; j < NY; j += nx) {
l = 1 + j + k;
for(i = 1; i < NX; ++i, ++l) {
ub = u[l] - u[l-1];
uf = u[l+1] - u[l];
ub = WRAPD(ub) ? wraptopid(ub) : ub;
uf = WRAPD(uf) ? wraptopid(uf) : uf;
du[l] = hx*(uf - ub);
ub = u[l] - u[l-nx];
uf = u[l+nx] - u[l];
ub = WRAPD(ub) ? wraptopid(ub) : ub;
uf = WRAPD(uf) ? wraptopid(uf) : uf;
du[l] = du[l] + hy*(uf - ub);
ub = u[l] - u[l-nxny];
uf = u[l+nxny] - u[l];
ub = WRAPD(ub) ? wraptopid(ub) : ub;
uf = WRAPD(uf) ? wraptopid(uf) : uf;
du[l] = du[l] + hz*(uf - ub);
}
}
}
return;
}
inline float
wraptopif(float x)
{
x += (float)PI;
return x < 0.0f
? fmodf(x, (float)TAU) + (float)PI
: fmodf(x, (float)TAU) - (float)PI;
}
inline double
wraptopid(double x)
{
x += (double)PI;
return x < 0.0
? fmod(x, (double)TAU) + (double)PI
: fmod(x, (double)TAU) - (double)PI;
}
|
GxB_IndexUnaryOp_ytype_name.c | //------------------------------------------------------------------------------
// GxB_IndexUnaryOp_ytype_name: return the type_name of y for z=f(x,i,j,y)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_IndexUnaryOp_ytype_name // return name of type of scalar y
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_IndexUnaryOp op
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_IndexUnaryOp_ytype_name (type_name, op)") ;
GB_RETURN_IF_NULL (type_name) ;
GB_RETURN_IF_NULL_OR_FAULTY (op) ;
ASSERT_INDEXUNARYOP_OK (op, "op for ytype_name", GB0) ;
//--------------------------------------------------------------------------
// get the type_name
//--------------------------------------------------------------------------
memcpy (type_name, op->ytype->name, GxB_MAX_NAME_LEN) ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
rg_filter.c | //////////////////////////////////////
// Cunren Liang, NASA JPL/Caltech
// Copyright 2015-2018...
//////////////////////////////////////
#include "resamp.h"
#include <fftw3.h>
#include <omp.h>
#define SWAP4(a) (*(unsigned int *)&(a) = (((*(unsigned int *)&(a) & 0x000000ff) << 24) | ((*(unsigned int *)&(a) & 0x0000ff00) << 8) | ((*(unsigned int *)&(a) >> 8) & 0x0000ff00) | ((*(unsigned int *)&(a) >> 24) & 0x000000ff)))
int rg_filter(char *inputfile, int nrg, int naz, int nout, char **outputfile, float *bw, float *bc, int nfilter, int nfft, float beta, int zero_cf, float offset, int byteorder, long imageoffset, long lineoffset){
/*
inputfile: input file
nrg file width
nout: number of output files
outputfile: (value_of_out_1, value_of_out_2, value_of_out_3...) output files
bw: (value_of_out_1, value_of_out_2, value_of_out_3...) filter bandwidth divided by sampling frequency [0, 1]
bc: (value_of_out_1, value_of_out_2, value_of_out_3...) filter center frequency divided by sampling frequency
nfilter: number samples of the filter (odd). Reference Value: 65
nfft: number of samples of the FFT. Reference Value: 1024
beta: kaiser window beta. Reference Value: 1.0
zero_cf: if bc != 0.0, move center frequency to zero? 0: Yes (Reference Value). 1: No.
offset: offset (in samples) of linear phase for moving center frequency. Reference Value: 0.0
byteorder: (0) LSB, little endian; (1) MSB, big endian of intput file
imageoffset: offset from start of the image of input file
lineoffset: length of each line of input file
*/
///////////////////////////////
// int k;
// printf("input parameters:");
// printf("%s\n", inputfile);
// printf("%d\n", nrg);
// printf("%d\n", nout);
// for(k =0; k<nout;k++){
// printf("%s\n", outputfile[k]);
// printf("%f\n", bw[k]);
// printf("%f\n", bc[k]);
// }
// printf("%d\n", nfilter);
// printf("%d\n", nfft);
// printf("%f\n", beta);
// printf("%d\n", zero_cf);
// printf("%f\n", offset);
///////////////////////////////
FILE *infp; //secondary image to be resampled
FILE **outfp; //resampled secondary image
fcomplex **filter;
fcomplex *in;
fcomplex **out;
fcomplex *tmp;
fcomplex *tmp2;
fcomplex *tmpf;
int *zeroflag;
fftwf_plan p_forward;
fftwf_plan p_backward;
fftwf_plan p_forward_filter;
//fftwf_plan p_backward_filter;
//int nout; //number of output files
//int nrg; //file width
//int naz; //file length
//int nfft; //fft length
//int nfilter; //filter length
int hnfilter;
//float *bw;
//float *bc;
//float beta; //kaiser window beta
//int zero_cf;
//float offset;
int argc_mand;
int nthreads;
float sc; //constant to scale the data read in to avoid large values
//during fft and ifft
float cf_pha;
float t;
fcomplex cf;
int nblock_in;
int nblock_out;
int num_block;
int i_block;
int nblock_in_last;
int nblock_out_last;
int i, j, i_out;
/*****************************************************************************/
//nfilter = 65;
//nfft = 1024;
//beta = 1.0;
//zero_cf = 0;
//offset = 0.0;
sc = 10000.0;
/*****************************************************************************/
infp = openfile(inputfile, "rb");
//naz = file_length(infp, nrg, sizeof(fcomplex));
//fseeko(infp,0L,SEEK_END);
//naz = (ftello(infp) - imageoffset) / (lineoffset + nrg*sizeof(fcomplex));
//rewind(infp);
printf("file width: %d, file length: %d\n\n", nrg, naz);
if(nout < 1){
fprintf(stderr, "there should be at least one output file!\n");
exit(1);
}
outfp = array1d_FILE(nout);
for(i = 0; i < nout; i++){
outfp[i] = openfile(outputfile[i], "wb");
}
//check filter length
if(nfilter < 3){
fprintf(stderr, "filter length: %d too small!\n", nfilter);
exit(1);
}
if(nfilter % 2 != 1){
fprintf(stderr, "filter length must be odd!\n");
exit(1);
}
if(byteorder == 0){
printf("inputfile byte order: little endian\n");
}
else{
printf("inputfile byte order: big endian\n");
}
printf("input file image offset [byte]: %ld\n", imageoffset);
printf("input file line offset [byte]: %ld\n", lineoffset);
if(imageoffset < 0){
fprintf(stderr, "image offset must be >= 0\n");
exit(1);
}
if(lineoffset < 0){
fprintf(stderr, "lineoffset offset must be >= 0\n");
exit(1);
}
//compute block processing parameters
hnfilter = (nfilter - 1) / 2;
nblock_in = nfft - nfilter + 1;
nblock_in += hnfilter;
if (nblock_in <= 0){
fprintf(stderr, "fft length too small compared with filter length!\n");
exit(1);
}
nblock_out = nblock_in - 2 * hnfilter;
num_block = (nrg - 2 * hnfilter) / nblock_out;
if((nrg - num_block * nblock_out - 2 * hnfilter) != 0){
num_block += 1;
}
if((nrg - 2 * hnfilter) <= 0){
num_block = 1;
}
if(num_block == 1){
nblock_out_last = 0;
nblock_in_last = nrg;
}
else{
nblock_out_last = nrg - (num_block - 1) * nblock_out - 2 * hnfilter;
nblock_in_last = nblock_out_last + 2 * hnfilter;
}
//allocate memory
filter = array2d_fcomplex(nout, nfft);
in = array1d_fcomplex(nrg);
out = array2d_fcomplex(nout, nrg);
tmp = array1d_fcomplex(nfft);
tmp2 = array1d_fcomplex(nfft);
tmpf = array1d_fcomplex(nfft);
zeroflag = array1d_int(nrg);
//as said in the FFTW document,
//Typically, the problem will have to involve at least a few thousand data points before threads become beneficial.
//so I choose not to use Multi-threaded FFTW, as our FFT size is mostly small.
if(0){
//////////////////////////////////////////////////////////////////////////////////////////////////
//Multi-threaded FFTW
nthreads = fftwf_init_threads();
if(nthreads == 0){
fprintf(stderr, "WARNING: there is some error in using multi-threaded FFTW.\n");
fprintf(stderr, " therefore it is not used, and computation performance is reduced.\n");
nthreads = 1;
}
else{
//int this_thread = omp_get_thread_num(), num_threads = omp_get_num_threads();
//nthreads = omp_get_num_threads();
nthreads = omp_get_max_threads();
}
printf("FFTW is using %d threads\n", nthreads);
//this works for all the following plans
if(nthreads != 1)
//actually it is OK to pass nthreads=1, in this case, threads are disabled.
fftwf_plan_with_nthreads(nthreads);
//////////////////////////////////////////////////////////////////////////////////////////////////
}
//create plans before initializing data, because FFTW_MEASURE overwrites the in/out arrays.
p_forward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmp, (fftwf_complex*)tmp, FFTW_FORWARD, FFTW_MEASURE);
p_backward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmp2, (fftwf_complex*)tmp2, FFTW_BACKWARD, FFTW_MEASURE);
p_forward_filter = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmpf, (fftwf_complex*)tmpf, FFTW_FORWARD, FFTW_ESTIMATE);
//computing filters
for(i = 0; i < nout; i++){
bandpass_filter(bw[i], bc[i], nfilter, nfft, (nfilter-1)/2, beta, tmpf);
//relationship of nr and matlab fft
//nr fft matlab fft
// 1 <==> ifft()*nfft
// -1 <==> fft()
//four1((float *)filter - 1, nfft, -1);
fftwf_execute(p_forward_filter);
for(j = 0; j < nfft; j++){
filter[i][j].re = tmpf[j].re;
filter[i][j].im = tmpf[j].im;
}
}
fftwf_destroy_plan(p_forward_filter);
//skip image header
if(imageoffset != 0)
fseek(infp, imageoffset, SEEK_SET);
//process data
for(i = 0; i < naz; i++){
//progress report
if((i + 1) % 1000 == 0 || (i + 1) == naz)
fprintf(stderr,"processing line: %6d of %6d\r", i+1, naz);
if((i + 1) == naz)
fprintf(stderr,"\n\n");
//read data
if(i != 0)
fseek(infp, lineoffset-(size_t)nrg * sizeof(fcomplex), SEEK_CUR);
readdata((fcomplex *)in, (size_t)nrg * sizeof(fcomplex), infp);
//swap bytes
if(byteorder!=0){
for(j = 0; j < nrg; j++){
SWAP4(in[j].re);
SWAP4(in[j].im);
}
}
#pragma omp parallel for private(j) shared(nrg,in, zeroflag, sc)
for(j = 0; j < nrg; j++){
if(in[j].re != 0.0 || in[j].im != 0.0){
zeroflag[j] = 1;
in[j].re *= 1.0 / sc;
in[j].im *= 1.0 / sc;
}
else{
zeroflag[j] = 0;
}
}
//process each block
for(i_block = 0; i_block < num_block; i_block++){
//zero out
//for(j = 0; j < nfft; j++){
// tmp[j].re = 0.0;
// tmp[j].im = 0.0;
//}
memset((void *)tmp, 0, (size_t)nfft*sizeof(fcomplex));
//get data
if(num_block == 1){
for(j = 0; j < nrg; j++){
tmp[j] = in[j];
}
}
else{
if(i_block == num_block - 1){
for(j = 0; j < nblock_in_last; j++){
tmp[j] = in[j+nblock_out*i_block];
}
}
else{
for(j = 0; j < nblock_in; j++){
tmp[j] = in[j+nblock_out*i_block];
}
}
}
//four1((float *)tmp - 1, nfft, -1);
//tested, the same as above
fftwf_execute(p_forward);
//process each output file
for(i_out = 0; i_out < nout; i_out++){
//looks like this makes it slower, so comment out
//#pragma omp parallel for private(j) shared(nfft, tmp2, filter, i_out, tmp)
for(j = 0; j < nfft; j++)
tmp2[j] = cmul(filter[i_out][j], tmp[j]);
//four1((float *)tmp2 - 1, nfft, 1);
//tested, the same as above
fftwf_execute(p_backward);
//get data
if(num_block == 1){
for(j = 0; j < nrg; j++){
out[i_out][j] = tmp2[j];
}
}
else{
if(i_block == 0){
for(j = 0; j < hnfilter + nblock_out; j++){
out[i_out][j] = tmp2[j];
}
}
else if(i_block == num_block - 1){
for(j = 0; j < hnfilter + nblock_out_last; j++){
out[i_out][nrg - 1 - j] = tmp2[nblock_in_last - 1 - j];
}
}
else{
for(j = 0; j < nblock_out; j++){
out[i_out][j + hnfilter + i_block * nblock_out] = tmp2[j + hnfilter];
}
}
}//end of getting data
}//end of processing each output file
}//end of processing each block
//move center frequency
if(zero_cf == 0){
//process each output file
//looks like this makes it slower, so comment out
//#pragma omp parallel for private(i_out, j, t, cf_pha, cf) shared(nout, bc, nrg, offset, out)
for(i_out = 0; i_out < nout; i_out++){
if(bc[i_out] != 0){
#pragma omp parallel for private(j, t, cf_pha, cf) shared(nrg, offset, bc, i_out, out)
for(j = 0; j < nrg; j++){
//t = j - (nrg - 1.0) / 2.0; //make 0 index exactly at range center
t = j + offset; //make 0 index exactly at range center
cf_pha = 2.0 * PI * (-bc[i_out]) * t;
cf.re = cos(cf_pha);
cf.im = sin(cf_pha);
out[i_out][j] = cmul(out[i_out][j], cf);
}
}
}
}
//scale back and write data
//process each output file
for(i_out = 0; i_out < nout; i_out++){
//scale back
#pragma omp parallel for private(j) shared(nrg, zeroflag, out, i_out, sc, nfft)
for(j = 0; j < nrg; j++){
if(zeroflag[j] == 0){
out[i_out][j].re = 0.0;
out[i_out][j].im = 0.0;
}
else{
out[i_out][j].re *= sc / nfft;
out[i_out][j].im *= sc / nfft;
}
}
//write data
writedata((fcomplex *)out[i_out], nrg * sizeof(fcomplex), outfp[i_out]);
}
}//end of processing data
fftwf_destroy_plan(p_forward);
fftwf_destroy_plan(p_backward);
free_array2d_fcomplex(filter);
free_array1d_fcomplex(in);
free_array2d_fcomplex(out);
free_array1d_fcomplex(tmp);
free_array1d_fcomplex(tmp2);
free_array1d_fcomplex(tmpf);
free_array1d_int(zeroflag);
//free_array1d_float(bw);
//free_array1d_float(bc);
fclose(infp);
for(i_out = 0; i_out < nout; i_out++)
fclose(outfp[i_out]);
//free_array1d_FILE(outfp);
return 0;
}//end main()
|
SpMat.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Narayanan Sundaram (Intel Corp.), Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SPMAT_H_
#define SRC_SPMAT_H_
#include <string>
#include <algorithm>
#include "src/TileOps.h"
#include "binary_search.h"
template <typename T>
bool compare_tile_id(const tedge_t<T>& a, const tedge_t<T>& b) {
if (a.tile_id < b.tile_id)
return true;
return false;
}
template <typename SpTile>
class SpMat {
public:
SpTile** tiles;
int* start_idx;
int* start_idy;
int* nodeIds;
int ntiles_x;
int ntiles_y;
int m;
int n;
bool empty;
std::string name;
int num_tiles_x;
int num_tiles_y;
int (*pfn)(int, int, int, int, int);
SpMat() { empty = true; }
void set(int _m, int _n, int _ntiles_x, int _ntiles_y, int* _nodeIds,
int* _start_idx, int* _start_idy) {
/*
if(global_myrank == 0)
{
for(int i = 0 ; i < _ntiles_y + 1 ; i++)
{
std::cout << "_start_idy[" << i << "]: " << _start_idy[i] << std::endl;
}
for(int i = 0 ; i < _ntiles_x + 1 ; i++)
{
std::cout << "_start_idx[" << i << "]: " << _start_idx[i] << std::endl;
}
}
*/
empty = false;
// Copy metadata
start_idx =
reinterpret_cast<int*>(_mm_malloc((_ntiles_x + 1) * sizeof(int), 64));
start_idy =
reinterpret_cast<int*>(_mm_malloc((_ntiles_y + 1) * sizeof(int), 64));
nodeIds = reinterpret_cast<int*>(
_mm_malloc((_ntiles_x * _ntiles_y) * sizeof(int), 64));
memcpy(start_idx, _start_idx, (_ntiles_x + 1) * sizeof(int));
memcpy(start_idy, _start_idy, (_ntiles_y + 1) * sizeof(int));
memcpy(nodeIds, _nodeIds, (_ntiles_x * _ntiles_y) * sizeof(int));
m = _m;
n = _n;
ntiles_x = _ntiles_x;
ntiles_y = _ntiles_y;
assert(ntiles_x > 0);
assert(ntiles_y > 0);
MPI_Barrier(MPI_COMM_WORLD);
// Allocate space for tiles
tiles = new SpTile* [ntiles_y];
for (int i = 0; i < ntiles_y; i++) {
tiles[i] = new SpTile[ntiles_x];
}
// Set metadata
for (int tile_j = 0; tile_j < ntiles_x; tile_j++) {
for (int tile_i = 0; tile_i < ntiles_y; tile_i++) {
int tile_m = start_idy[tile_i + 1] - start_idy[tile_i];
int tile_n = start_idx[tile_j + 1] - start_idx[tile_j];
tiles[tile_i][tile_j] = SpTile(tile_m, tile_n);
std::stringstream ss;
ss << "Created_" << tile_i << "," << tile_j;
tiles[tile_i][tile_j].name = ss.str();
}
}
}
inline int getPartition(const int src, const int dst, int* ival, int* jval) const {
(*ival) = -1;
(*jval) = -1;
for (int i = 0; i < ntiles_y; i++) {
if ((src > start_idy[i]) && (src <= start_idy[i + 1])) {
(*ival) = i;
break;
}
}
for (int j = 0; j < ntiles_x; j++) {
if ((dst > start_idx[j]) && (dst <= start_idx[j + 1])) {
(*jval) = j;
break;
}
}
if ((*ival) == -1 || (*jval) == -1) {
printf("%d %d == -1\n", src, dst);
return -1;
}
return (*ival) + (*jval) * ntiles_y;
}
template <typename T>
void ingestEdgelist(edgelist_t<T> blob) {
int nnz_l = blob.nnz;
edge_t<T>* edge_list = blob.edges;
int m = blob.m;
int n = blob.n;
printf("Rank %d: Before shuffle %d edges\n", global_myrank, blob.nnz);
edge_t<T> * received_edges;
unsigned long int new_nnz = 0;
if(global_nrank == 1)
{
new_nnz = nnz_l;
received_edges = new edge_t<T>[new_nnz];
memcpy(received_edges, edge_list, new_nnz * sizeof(edge_t<T>));
}
else
{
tedge_t<T> * tedges = new tedge_t<T>[nnz_l];
#pragma omp parallel for
for(unsigned long i = 0 ; i < nnz_l ; i++)
{
tedges[i].src = edge_list[i].src;
tedges[i].dst = edge_list[i].dst;
tedges[i].val = edge_list[i].val;
int ival, jval;
int tile_id = getPartition(edge_list[i].src, edge_list[i].dst, &ival, &jval);
assert(tile_id != -1);
tedges[i].tile_id = nodeIds[ival + jval * ntiles_y];
}
__gnu_parallel::sort(tedges, tedges + nnz_l, compare_tile_id<T>);
int * assignment = new int[nnz_l];
#pragma omp parallel for
for(unsigned long i = 0 ; i < nnz_l ; i++)
{
edge_list[i].src = tedges[i].src;
edge_list[i].dst = tedges[i].dst;
edge_list[i].val = tedges[i].val;
assignment[i] = tedges[i].tile_id;
}
delete [] tedges;
unsigned long int * positions = new unsigned long[global_nrank+1];
unsigned long int * counts = new unsigned long[global_nrank];
unsigned long int * recv_positions = new unsigned long[global_nrank+1];
unsigned long int * recv_counts = new unsigned long[global_nrank];
unsigned long int current_count = 0;
for(int i = 0 ; i < global_nrank ; i++)
{
int point = binary_search_right_border(assignment, i, 0, nnz_l, nnz_l);
if(point == -1)
{
counts[i] = 0;
positions[i] = current_count;
}
else
{
counts[i] = (point+1) - current_count;
positions[i] = current_count;
current_count = (point+1);
}
if(global_myrank == 0)
{
std::cout << "point: " << point << "\t" << counts[i] << std::endl;
}
}
positions[global_nrank] = nnz_l;
MPI_Barrier(MPI_COMM_WORLD);
delete [] assignment;
MPI_Request* mpi_req = new MPI_Request[2 * global_nrank];
MPI_Status* mpi_status = new MPI_Status[2 * global_nrank];
for (int i = 0; i < global_nrank; i++) {
MPI_Isend(&counts[i], 1, MPI_UNSIGNED_LONG, i, global_myrank, MPI_COMM_WORLD,
&mpi_req[i]);
}
for (int i = 0; i < global_nrank; i++) {
MPI_Irecv(&recv_counts[i], 1, MPI_UNSIGNED_LONG, i, i, MPI_COMM_WORLD,
&mpi_req[i + global_nrank]);
}
MPI_Waitall(2 * global_nrank, mpi_req, mpi_status);
MPI_Barrier(MPI_COMM_WORLD);
recv_positions[0] = 0;
for(int i = 0 ; i < global_nrank ; i++)
{
new_nnz += recv_counts[i];
recv_positions[i+1] = new_nnz;
}
printf("Rank %d: After shuffle %d edges\n", global_myrank, new_nnz);
MPI_Datatype MPI_EDGE_T;
MPI_Type_contiguous(sizeof(edge_t<T>), MPI_CHAR, &MPI_EDGE_T);
MPI_Type_commit(&MPI_EDGE_T);
for (int i = 0; i < global_nrank; i++) {
MPI_Isend(edge_list + positions[i], counts[i] ,
MPI_EDGE_T, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]);
}
received_edges = new edge_t<T>[new_nnz];
for (int i = 0; i < global_nrank; i++) {
MPI_Irecv(received_edges + recv_positions[i], recv_counts[i] ,
MPI_EDGE_T, i, i, MPI_COMM_WORLD, &mpi_req[i+global_nrank]);
}
MPI_Waitall(2 * global_nrank, mpi_req, mpi_status);
MPI_Barrier(MPI_COMM_WORLD);
}
printf("Rank %d: After shuffle %d edges\n", global_myrank, new_nnz);
tedge_t<T> * tedges2 = new tedge_t<T>[new_nnz];
#pragma omp parallel for
for(unsigned long i = 0 ; i < new_nnz ; i++)
{
tedges2[i].src = received_edges[i].src;
tedges2[i].dst = received_edges[i].dst;
tedges2[i].val = received_edges[i].val;
int ival, jval;
tedges2[i].tile_id = getPartition(received_edges[i].src, received_edges[i].dst, &ival, &jval);
assert(tedges2[i].tile_id != -1);
}
__gnu_parallel::sort(tedges2, tedges2 + new_nnz , compare_tile_id<T>);
int * assignment2 = new int[new_nnz];
#pragma omp parallel for
for(unsigned long i = 0 ; i < new_nnz ; i++)
{
received_edges[i].src = tedges2[i].src;
received_edges[i].dst = tedges2[i].dst;
received_edges[i].val = tedges2[i].val;
assignment2[i] = tedges2[i].tile_id;
}
delete [] tedges2;
for (int tile_j = 0; tile_j < ntiles_x; tile_j++) {
for (int tile_i = 0; tile_i < ntiles_y; tile_i++) {
if (nodeIds[tile_i + tile_j * ntiles_y] == global_myrank) {
int tile_m = start_idy[tile_i + 1] - start_idy[tile_i];
int tile_n = start_idx[tile_j + 1] - start_idx[tile_j];
int this_tile_id = tile_i + tile_j * ntiles_y;
// Find left and right
int start_nz = binary_search_left_border(assignment2, this_tile_id, 0, new_nnz, new_nnz);
int end_nz = binary_search_right_border(assignment2, this_tile_id, 0, new_nnz, new_nnz) + 1;
int nnz = 0;
if((start_nz != -1) && (end_nz != -1))
{
nnz = end_nz - start_nz;
}
if (nnz <= 0) {
tiles[tile_i][tile_j] = SpTile(tile_m, tile_n);
std::stringstream ss;
ss << "LoadedEmpty_" << tile_i << "," << tile_j;
tiles[tile_i][tile_j].name = ss.str();
} else {
tiles[tile_i][tile_j] =
SpTile(received_edges + start_nz, tile_m, tile_n, nnz, start_idy[tile_i],
start_idx[tile_j]);
std::stringstream ss;
ss << "Loaded_" << tile_i << "," << tile_j;
tiles[tile_i][tile_j].name = ss.str();
}
}
}
}
delete [] assignment2;
delete [] received_edges;
MPI_Barrier(MPI_COMM_WORLD);
}
void print_tiles(std::string msg, int output_rank) {
MPI_Barrier(MPI_COMM_WORLD);
{
if (global_myrank == output_rank) {
std::cout << "Rank " << global_myrank << "\t" << msg << std::endl;
for (int i = 0; i < ntiles_y; i++) {
for (int j = 0; j < ntiles_x; j++) {
std::cout << nodeIds[i + j * ntiles_y];
}
std::cout << std::endl;
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
void Allocate2DPartitioned(int m, int n, int _num_tiles_x, int _num_tiles_y,
int (*_pfn)(int, int, int, int, int)) {
num_tiles_x = _num_tiles_x;
num_tiles_y = _num_tiles_y;
pfn = _pfn;
int vx, vy;
int roundup = 256;
int ntiles_x = num_tiles_x;
int ntiles_y = num_tiles_y;
vx = ((((n + ntiles_x - 1) / ntiles_x) + roundup - 1) / roundup) * roundup;
vy = ((((m + ntiles_y - 1) / ntiles_y) + roundup - 1) / roundup) * roundup;
int* nodeIds = reinterpret_cast<int*>(
_mm_malloc(num_tiles_x * num_tiles_y * sizeof(int), 64));
int* startx =
reinterpret_cast<int*>(_mm_malloc((num_tiles_x + 1) * sizeof(int), 64));
int* starty =
reinterpret_cast<int*>(_mm_malloc((num_tiles_y + 1) * sizeof(int), 64));
for (int i = 0; i < num_tiles_y; i++) {
for (int j = 0; j < num_tiles_x; j++) {
nodeIds[i + j * num_tiles_y] =
pfn(j, i, num_tiles_x, num_tiles_y, global_nrank);
}
}
for (int j = 0; j < num_tiles_x; j++) {
startx[j] = std::min(vx * j, n);
}
for (int i = 0; i < num_tiles_y; i++) {
starty[i] = std::min(vy * i, m);
}
startx[num_tiles_x] = n;
starty[num_tiles_y] = m;
set(m, n, num_tiles_x, num_tiles_y, nodeIds, startx, starty);
}
template<typename T>
void setElement(const int idx, const int idy, T val)
{
assert(!empty);
int ival, jval;
int tile = getPartition(idy, idx, &ival, &jval);
assert(tile != -1);
if(nodeIds[ival + jval * ntiles_y] == global_myrank)
{
tiles[ival][jval].set(idx - start_idx[jval], idy - start_idy[ival], val);
}
}
template<typename T>
T getElement(const int idx, const int idy) const {
assert(!empty);
int ival, jval;
int tile = getPartition(idy, idx, &ival, &jval); // Is this right?
assert(tile != -1);
if(nodeIds[ival + jval * ntiles_y] == global_myrank)
{
return tiles[ival][jval].get(idx - start_idx[jval], idy - start_idy[ival]);
}
}
template <typename T>
void get_edges(edgelist_t<T>* edgelist) const {
// Get nnz
int nnzs = 0;
for (int i = 0; i < ntiles_y; i++) {
for (int j = 0; j < ntiles_x; j++) {
if (nodeIds[i + j * ntiles_y] == global_myrank) {
nnzs += tiles[i][j].nnz;
}
}
}
edgelist->m = m;
edgelist->n = n;
edgelist->nnz = nnzs;
if(nnzs > 0)
{
edgelist->edges = reinterpret_cast<edge_t<T>*>(
_mm_malloc((uint64_t)nnzs * (uint64_t)sizeof(edge_t<T>), 64));
nnzs = 0;
for (int i = 0; i < ntiles_y; i++) {
for (int j = 0; j < ntiles_x; j++) {
if (nodeIds[i + j * ntiles_y] == global_myrank) {
tiles[i][j]
.get_edges(edgelist->edges + nnzs, start_idy[i], start_idx[j]);
nnzs += tiles[i][j].nnz;
}
}
}
}
}
uint64_t getNNZ()
{
uint64_t total_nnz = 0;
for(int i = 0 ; i < ntiles_y ; i++)
{
for(int j = 0 ; j < ntiles_x ; j++)
{
if(nodeIds[i + j * ntiles_y] == global_myrank)
{
total_nnz += tiles[i][j].nnz;
}
}
}
// global reduction
MPI_Allreduce(MPI_IN_PLACE, &total_nnz, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
return total_nnz;
}
};
#endif // SRC_SPMAT_H_
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resource_.h"
#include "magick/resize.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict chop_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict chop_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
i;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (i=0; i < (ssize_t) GetImageListLength(images); i+=4)
{
cmyk_image=CloneImage(images,0,0,MagickTrue,exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace);
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(cmyk_view);
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange-
GetPixelIntensity(images,p)));
p++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
AppendImageToList(&cmyk_images,cmyk_image);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) ||
((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict crop_indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view);
(void) memcpy(q,p,(size_t) crop_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(crop_indexes != (IndexPacket *) NULL))
(void) memcpy(crop_indexes,indexes,(size_t) crop_image->columns*
sizeof(*crop_indexes));
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CropImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainPixelOffset(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static inline ssize_t PixelRoundOffset(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return((ssize_t) floor(ConstrainPixelOffset(x)));
return((ssize_t) ceil(ConstrainPixelOffset(x)));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=PixelRoundOffset((MagickRealType) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((MagickRealType) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=PixelRoundOffset((MagickRealType) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((MagickRealType) (offset.y+
(geometry.y < 0 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=PixelRoundOffset((MagickRealType) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((MagickRealType) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=PixelRoundOffset((MagickRealType) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((MagickRealType) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict excerpt_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) excerpt_image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view);
if (excerpt_indexes != (IndexPacket *) NULL)
(void) memcpy(excerpt_indexes,indexes,(size_t)
excerpt_image->columns*sizeof(*excerpt_indexes));
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
MagickBooleanType
status;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
(void) DeleteImageProfile(extent_image,"8bim"); /* delete clipping path */
status=SetImageBackgroundColor(extent_image);
if (status == MagickFalse)
{
InheritException(exception,&extent_image->exception);
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
status=CompositeImage(extent_image,image->compose,image,-geometry->x,
-geometry->y);
if (status == MagickFalse)
{
InheritException(exception,&extent_image->exception);
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict flip_indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewVirtualIndexQueue(image_view);
if (indexes != (const IndexPacket *) NULL)
{
flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view);
if (flip_indexes != (IndexPacket *) NULL)
(void) memcpy(flip_indexes,indexes,(size_t) image->columns*
sizeof(*flip_indexes));
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlipImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict flop_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=flop_image->columns;
indexes=GetCacheViewVirtualIndexQueue(image_view);
flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view);
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
(*--q)=(*p++);
if ((indexes != (const IndexPacket *) NULL) &&
(flop_indexes != (IndexPacket *) NULL))
SetPixelIndex(flop_indexes+flop_image->columns-x-1,
GetPixelIndex(indexes+x));
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict destination_indexes;
register PixelPacket
*magick_restrict q;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source_view);
(void) memcpy(q,p,(size_t) columns*sizeof(*p));
if (indexes != (IndexPacket *) NULL)
{
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
if (destination_indexes != (IndexPacket *) NULL)
(void) memcpy(destination_indexes,indexes,(size_t)
columns*sizeof(*indexes));
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse)
{
InheritException(exception,&splice_image->exception);
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
(void) SetImageBackgroundColor(splice_image);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case StaticGravity:
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes,
*magick_restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,1)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes,
*magick_restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
/*
DANGER: This function destroys what it assumes to be a single image list.
If the input image is part of a larger list, all other images in that list
will be simply 'lost', not destroyed.
Also if the crop generates a list of images only the first image is resized.
And finally if the crop succeeds and the resize failed, you will get a
cropped image, as well as a 'false' or 'failed' report.
This function and should probably be deprecated in favor of direct calls
to CropImageToTiles() or ResizeImage(), as appropriate.
*/
MagickExport MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry)
{
Image
*resize_image,
*transform_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
flags=ParseRegionGeometry(transform_image,image_geometry,&geometry,
&(*image)->exception);
(void) flags;
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,transform_image->blur,&(*image)->exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImages() calls TransformImage() on each image of a sequence.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImages(Image **image,
% const char *crop_geometry,const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
MagickExport MagickBooleanType TransformImages(Image **images,
const char *crop_geometry,const char *image_geometry)
{
Image
*image,
**image_list,
*transform_images;
MagickStatusType
status;
register ssize_t
i;
assert(images != (Image **) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
image_list=ImageListToArray(*images,&(*images)->exception);
if (image_list == (Image **) NULL)
return(MagickFalse);
status=MagickTrue;
transform_images=NewImageList();
for (i=0; image_list[i] != (Image *) NULL; i++)
{
image=image_list[i];
status&=TransformImage(&image,crop_geometry,image_geometry);
AppendImageToList(&transform_images,image);
}
*images=transform_images;
image_list=(Image **) RelinquishMagickMemory(image_list);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict transpose_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view);
if (transpose_indexes != (IndexPacket *) NULL)
(void) memcpy(transpose_indexes,indexes,(size_t)
image->columns*sizeof(*transpose_indexes));
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransposeImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict transverse_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-
1),0,1,transverse_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view);
if (transverse_indexes != (IndexPacket *) NULL)
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(transverse_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
pwsafe_fmt_plug.c | /* Password Safe and Password Gorilla cracker patch for JtR. Hacked together
* during May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* Optimization patch during January of 2013 by Brian Wallace <brian.wallace9809 at gmail.com>.
*
* This software is Copyright (c) 2012-2013
* Dhiru Kholia <dhiru.kholia at gmail.com> and Brian Wallace <brian.wallace9809 at gmail.com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pwsafe;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pwsafe);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
//#undef SIMD_COEF_32
#include "sha2.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1 // tuned on core i7
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "pwsafe"
#define FORMAT_NAME "Password Safe"
#define FORMAT_TAG "$pwsafe$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA256 " SHA256_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 32
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#if ARCH_LITTLE_ENDIAN==1
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
#else
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
#endif
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests pwsafe_tests[] = {
{"$pwsafe$*3*fefc1172093344c9d5577b25f5b4b6e5d2942c94f9fc24c21733e28ae6527521*2048*88cbaf7d8668c1a98263f5dce7cb39c3304c49a3e0d76a7ea475dc02ab2f97a7", "12345678"},
{"$pwsafe$*3*581cd1135b9b993ccb0f6b01c1fcfacd799c69960496c96286f94fe1400c1b25*2048*4ab3c2d3af251e94eb2f753fdf30fb9da074bec6bac0fa9d9d152b95fc5795c6", "openwall"},
{"$pwsafe$*3*34ba0066d0fc594c126b60b9db98b6024e1cf585901b81b5b005ce386f173d4c*2048*cc86f1a5d930ff19b3602770a86586b5d9dea7bb657012aca875aa2a7dc71dc0", "12345678901234567890123"},
{"$pwsafe$*3*a42431191707895fb8d1121a3a6e255e33892d8eecb50fc616adab6185b5affb*2048*0f71d12df2b7c5394ae90771f6475a7ad0437007a8eeb5d9b58e35d8fd57c827", "123456789012345678901234567"},
{"$pwsafe$*3*c380dee0dbb536f5454f78603b020be76b33e294e9c2a0e047f43b9c61669fc8*2048*e88ed54a85e419d555be219d200563ae3ba864e24442826f412867fc0403917d", "this is an 87 character password to test the max bound of pwsafe-opencl................"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
int version;
unsigned int iterations;
unsigned char salt[32];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
// format $pwsafe$version*salt*iterations*hash
char *p;
char *ctcopy;
char *keeptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$pwsafe$*" */
if ((p = strtokm(ctcopy, "*")) == NULL) /* version */
goto err;
if (!isdec(p))
goto err;
if (!atoi(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (strlen(p) < 64)
goto err;
if (strspn(p, HEXCHARS_lc) != 64)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if (!atoi(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* hash */
goto err;
if (strlen(p) != 64)
goto err;
if (strspn(p, HEXCHARS_lc) != 64)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$pwsafe$*" */
p = strtokm(ctcopy, "*");
cs.version = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < 32; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.iterations = (unsigned int)atoi(p);
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#ifndef SIMD_COEF_32
#define rotl(x,y) ( x<<y | x>>(32-y) )
#define rotr(x,y) ( x>>y | x<<(32-y) )
#define CHOICE(x,y,z) ( z ^ (x & ( y ^ z)) )
#define MAJORITY(x,y,z) ( (x & y) | (z & (x | y)) )
#define ROTXOR1(x) (rotr(x,2) ^ rotr(x,13) ^ rotr(x,22))
#define ROTXOR2(x) (rotr(x,6) ^ rotr(x,11) ^ rotr(x,25))
#define ROTXOR3(x) (rotr(x,7) ^ rotr(x,18) ^ (x>>3))
#define ROTXOR4(x) (rotr(x,17) ^ rotr(x,19) ^ (x>>10))
#if ARCH_LITTLE_ENDIAN
#define bytereverse(x) ( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )
#else
#define bytereverse(x) (x)
#endif
static void pwsafe_sha256_iterate(unsigned int * state, unsigned int iterations)
{
unsigned int word00,word01,word02,word03,word04,word05,word06,word07;
unsigned int word08,word09,word10,word11,word12,word13,word14,word15;
unsigned int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
iterations++;
word00 = state[0];
word01 = state[1];
word02 = state[2];
word03 = state[3];
word04 = state[4];
word05 = state[5];
word06 = state[6];
word07 = state[7];
while(iterations)
{
iterations--;
temp0 = 0x6a09e667UL;
temp1 = 0xbb67ae85UL;
temp2 = 0x3c6ef372UL;
temp3 = 0xa54ff53aUL;
temp4 = 0x510e527fUL;
temp5 = 0x9b05688cUL;
temp6 = 0x1f83d9abUL;
temp7 = 0x5be0cd19UL;
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x428a2f98 + (word00);
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x71374491 + (word01);
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xb5c0fbcf + (word02);
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xe9b5dba5 + (word03);
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x3956c25b + (word04);
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x59f111f1 + (word05);
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x923f82a4 + (word06);
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xab1c5ed5 + (word07);
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xd807aa98 + ( (word08 = 0x80000000U) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x12835b01 + ( (word09 = 0) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x243185be + ( (word10 = 0) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x550c7dc3 + ( (word11 = 0) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x72be5d74 + ( (word12 = 0) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x80deb1fe + ( (word13 = 0) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x9bdc06a7 + ( (word14 = 0) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xc19bf174 + ( (word15 = 256) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xe49b69c1 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xefbe4786 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x0fc19dc6 + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x240ca1cc + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x2de92c6f + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x4a7484aa + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x5cb0a9dc + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x76f988da + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x983e5152 + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xa831c66d + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xb00327c8 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xbf597fc7 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0xc6e00bf3 + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xd5a79147 + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x06ca6351 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x14292967 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x27b70a85 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x2e1b2138 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x4d2c6dfc + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x53380d13 + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x650a7354 + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x766a0abb + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x81c2c92e + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x92722c85 + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xa2bfe8a1 + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xa81a664b + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xc24b8b70 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xc76c51a3 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0xd192e819 + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xd6990624 + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0xf40e3585 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x106aa070 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x19a4c116 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x1e376c08 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x2748774c + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x34b0bcb5 + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x391c0cb3 + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x4ed8aa4a + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x5b9cca4f + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x682e6ff3 + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x748f82ee + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) );
temp3 += temp7;
temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 );
temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x78a5636f + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) );
temp2 += temp6;
temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 );
temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x84c87814 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) );
temp1 += temp5;
temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 );
temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x8cc70208 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) );
temp0 += temp4;
temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 );
temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x90befffa + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) );
temp7 += temp3;
temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 );
temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xa4506ceb + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) );
temp6 += temp2;
temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 );
temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0xbef9a3f7 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) );
temp5 += temp1;
temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 );
temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xc67178f2 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) );
temp4 += temp0;
temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 );
word00 = 0x6a09e667UL + temp0;
word01 = 0xbb67ae85UL + temp1;
word02 = 0x3c6ef372UL + temp2;
word03 = 0xa54ff53aUL + temp3;
word04 = 0x510e527fUL + temp4;
word05 = 0x9b05688cUL + temp5;
word06 = 0x1f83d9abUL + temp6;
word07 = 0x5be0cd19UL + temp7;
}
state[0] = bytereverse(word00);
state[1] = bytereverse(word01);
state[2] = bytereverse(word02);
state[3] = bytereverse(word03);
state[4] = bytereverse(word04);
state[5] = bytereverse(word05);
state[6] = bytereverse(word06);
state[7] = bytereverse(word07);
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT)
{
SHA256_CTX ctx;
#ifdef SIMD_COEF_32
unsigned int i;
unsigned char _IBuf[64*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys, tmpBuf[32];
uint32_t *keys32, j;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys32 = (uint32_t*)keys;
memset(keys, 0, 64*MAX_KEYS_PER_CRYPT);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i]));
SHA256_Update(&ctx, cur_salt->salt, 32);
SHA256_Final(tmpBuf, &ctx);
for (j = 0; j < 32; ++j)
keys[GETPOS(j, i)] = tmpBuf[j];
keys[GETPOS(j, i)] = 0x80;
// 32 bytes of crypt data (0x100 bits).
keys[GETPOS(62, i)] = 0x01;
}
for (i = 0; i < cur_salt->iterations; i++) {
SIMDSHA256body(keys, keys32, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
}
// Last one with FLAT_OUT
SIMDSHA256body(keys, crypt_out[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT);
#else
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA256_Update(&ctx, cur_salt->salt, 32);
SHA256_Final((unsigned char*)crypt_out[index], &ctx);
#if 1
// This complex crap only boosted speed on my quad-HT from 5016 to 5285.
// A ton of complex code for VERY little gain. The SIMD change gave us
// a 4x improvement with very little change. This pwsafe_sha256_iterate
// does get 5% gain, but 400% is so much better, lol. I put the other
// code in to be able to dump data out easier, getting dump_stuff()
// data in flat, to be able to help get the SIMD code working.
#ifdef COMMON_DIGEST_FOR_OPENSSL
pwsafe_sha256_iterate(ctx.hash, cur_salt->iterations);
memcpy(crypt_out[index], ctx.hash, 32);
#else
pwsafe_sha256_iterate(ctx.h, cur_salt->iterations);
memcpy(crypt_out[index], ctx.h, 32);
#endif
#else
{ int i;
for (i = 0; i <= cur_salt->iterations; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, (unsigned char*)crypt_out[index], 32);
SHA256_Final((unsigned char*)crypt_out[index], &ctx);
} }
#endif
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void pwsafe_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_pwsafe = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
pwsafe_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
pwsafe_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
scheduled-clause.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n=200,chunk,a[n],suma=0;
if(argc < 3) {
fprintf(stderr,"\nFalta iteraciones o chunk \n");
exit(-1);
}
n = atoi(argv[1]); if (n>200) n=200; chunk = atoi(argv[2]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma) schedule(dynamic,chunk)
for (i=0; i<n; i++)
{ suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",
omp_get_thread_num(),i,a[i],suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
return(0);
}
|
openmp.c | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <omp.h>
#define PRINTABLE_TIME(_x) ((long double) (clock() - _x)/CLOCKS_PER_SEC)
int
main(int argc, char const *argv[])
{
size_t N, NUM_THREADS, i, sec_p = 0, p = 0, *A;
clock_t cl;
if (argc < 3)
{
printf("Faltan argumentos: N dimension del arreglo. \n");
exit(-1);
}
N = atoi(argv[1]);
NUM_THREADS = atoi(argv[2]);
omp_set_num_threads(NUM_THREADS);
A = (size_t*) malloc(sizeof(size_t) * N);
#pragma omp parallel for shared(A) private(i)
for (i = 0; i < N; i++)
{
A[i] = i;
}
cl = clock();
#pragma omp parallel for private(i) reduction(+:p)
for (i = 0; i < N; i++)
{
p += !(A[i] % 2);
}
printf("Time: %Lfs\n", PRINTABLE_TIME(cl));
for (i = 0; i < N; i++)
{
sec_p += !(A[i] % 2);
}
if (sec_p != p) {
printf("Failure\n");
exit(-1);
}
printf("Success!\n");
return 0;
}
|
rkb_screen.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
#define LL 0
#define SS 1
#define SL 2
#define LS 3
int int2e_spinor();
int int2e_spsp1spsp2_spinor();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
int CVHFrkbllll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (opt->dm_cond[j*n+k] > dmin)
|| (opt->dm_cond[j*n+l] > dmin)
|| (opt->dm_cond[i*n+k] > dmin)
|| (opt->dm_cond[i*n+l] > dmin));
}
int CVHFrkbllll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + nbas*nbas;
for (idm = 0; idm < (n_dm+1)/2; idm++) {
// note in _vhf.rdirect_mapdm, J and K share the same DM
dms_cond[idm*2+0] = pdmscond + idm*nbas*nbas; // for vj
dms_cond[idm*2+1] = pdmscond + idm*nbas*nbas; // for vk
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
int CVHFrkbssll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *dmsl = opt->dm_cond + n*n*SL;
double qijkl = opt->q_cond[n*n*SS+i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[n*n*SS+j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (dmsl[j*n+k] > dmin)
|| (dmsl[j*n+l] > dmin)
|| (dmsl[i*n+k] > dmin)
|| (dmsl[i*n+l] > dmin));
}
// be careful with the order in dms_cond, the current order (dmll, dmss, dmsl)
// is consistent to the function _call_veff_ssll in dhf.py
int CVHFrkbssll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[nbas*nbas*SS+i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + 4*nbas*nbas;
int nset = (n_dm+2) / 3;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
for (idm = 0; idm < nset; idm++) {
dms_cond[nset*0+idm] = dmscondll + idm*nbas*nbas;
dms_cond[nset*1+idm] = dmscondss + idm*nbas*nbas;
dms_cond[nset*2+idm] = dmscondsl + idm*nbas*nbas;
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
static void set_qcond(int (*intor)(), CINTOpt *cintopt, double *qcond,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
double qtmp, tmp;
int i, j, ij, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double complex *buf = malloc(sizeof(double complex) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = cabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
qcond[ish*nbas+jsh] = qtmp;
qcond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFrkbllll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spsp1spsp2_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
set_qcond(&int2e_spinor, NULL, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
set_qcond(&int2e_spsp1spsp2_spinor, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
}
static void set_dmcond(double *dmcond, double *dmscond, double complex *dm,
double direct_scf_cutoff, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const size_t nao = ao_loc[nbas];
double dmax, dmaxi, tmp;
int i, j, ish, jsh;
int iset;
double complex *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh <= ish; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
dmaxi = 0;
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = .5 * (cabs(pdm[i*nao+j]) + cabs(pdm[j*nao+i]));
dmaxi = MAX(dmaxi, tmp);
} }
dmscond[iset*nbas*nbas+ish*nbas+jsh] = dmaxi;
dmscond[iset*nbas*nbas+jsh*nbas+ish] = dmaxi;
dmax = MAX(dmax, dmaxi);
}
dmcond[ish*nbas+jsh] = dmax;
dmcond[jsh*nbas+ish] = dmax;
} }
}
// dm_cond ~ 1+nset, dm_cond + dms_cond
void CVHFrkbllll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
// dmcond followed by dmscond which are max matrix element for each dm
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
// the current order of dmscond (dmll, dmss, dmsl) is consistent to the
// function _call_veff_ssll in dhf.py
void CVHFrkbssll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
if (nset < 3) {
fprintf(stderr, "At least 3 sets of DMs (dmll,dmss,dmsl) are "
"required to set rkb prescreening\n");
exit(1);
}
nset = nset / 3;
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*4*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*4*(1+nset));
// 4 types of dmcond (LL,SS,SL,SS) followed by 4 types of dmscond
int n2c = CINTtot_cgto_spinor(bas, nbas);
double *dmcondll = opt->dm_cond + nbas*nbas*LL;
double *dmcondss = opt->dm_cond + nbas*nbas*SS;
double *dmcondsl = opt->dm_cond + nbas*nbas*SL;
//double *dmcondls = opt->dm_cond + nbas*nbas*LS;
double *pdmscond = opt->dm_cond + nbas*nbas*4;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
//double *dmscondls = dmscond + nset*nbas*nbas*LS;
double complex *dmll = dm + n2c*n2c*LL*nset;
double complex *dmss = dm + n2c*n2c*SS*nset;
double complex *dmsl = dm + n2c*n2c*SL*nset;
//double complex *dmls = dm + n2c*n2c*LS*nset;
set_dmcond(dmcondll, dmscondll, dmll,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondss, dmscondss, dmss,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondsl, dmscondsl, dmsl,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
|
4718.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(j) collapse(2) schedule(static, 8) num_threads(2)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
vector.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_Vector class.
*
*****************************************************************************/
#include "seq_mv.h"
#include "_hypre_utilities.hpp" //RL: TODO vector_device.c, include cuda there
/*--------------------------------------------------------------------------
* hypre_SeqVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCreate( HYPRE_Int size )
{
hypre_Vector *vector;
vector = hypre_CTAlloc(hypre_Vector, 1, HYPRE_MEMORY_HOST);
hypre_VectorData(vector) = NULL;
hypre_VectorSize(vector) = size;
hypre_VectorNumVectors(vector) = 1;
hypre_VectorMultiVecStorageMethod(vector) = 0;
/* set defaults */
hypre_VectorOwnsData(vector) = 1;
hypre_VectorMemoryLocation(vector) = hypre_HandleMemoryLocation(hypre_handle());
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultiVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqMultiVectorCreate( HYPRE_Int size, HYPRE_Int num_vectors )
{
hypre_Vector *vector = hypre_SeqVectorCreate(size);
hypre_VectorNumVectors(vector) = num_vectors;
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorDestroy( hypre_Vector *vector )
{
HYPRE_Int ierr=0;
if (vector)
{
HYPRE_MemoryLocation memory_location = hypre_VectorMemoryLocation(vector);
if ( hypre_VectorOwnsData(vector) )
{
hypre_TFree(hypre_VectorData(vector), memory_location);
}
hypre_TFree(vector, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorInitialize_v2( hypre_Vector *vector, HYPRE_MemoryLocation memory_location )
{
HYPRE_Int size = hypre_VectorSize(vector);
HYPRE_Int ierr = 0;
HYPRE_Int num_vectors = hypre_VectorNumVectors(vector);
HYPRE_Int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector);
hypre_VectorMemoryLocation(vector) = memory_location;
/* Caveat: for pre-existing data, the memory location must be guaranteed
* to be consistent with `memory_location'
* Otherwise, mismatches will exist and problems will be encountered
* when being used, and freed */
if ( !hypre_VectorData(vector) )
{
hypre_VectorData(vector) = hypre_CTAlloc(HYPRE_Complex, num_vectors*size, memory_location);
}
if ( multivec_storage_method == 0 )
{
hypre_VectorVectorStride(vector) = size;
hypre_VectorIndexStride(vector) = 1;
}
else if ( multivec_storage_method == 1 )
{
hypre_VectorVectorStride(vector) = 1;
hypre_VectorIndexStride(vector) = num_vectors;
}
else
{
++ierr;
}
return ierr;
}
HYPRE_Int
hypre_SeqVectorInitialize( hypre_Vector *vector )
{
HYPRE_Int ierr;
ierr = hypre_SeqVectorInitialize_v2( vector, hypre_VectorMemoryLocation(vector) );
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetDataOwner( hypre_Vector *vector,
HYPRE_Int owns_data )
{
HYPRE_Int ierr=0;
hypre_VectorOwnsData(vector) = owns_data;
return ierr;
}
/*--------------------------------------------------------------------------
* ReadVector
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorRead( char *file_name )
{
hypre_Vector *vector;
FILE *fp;
HYPRE_Complex *data;
HYPRE_Int size;
HYPRE_Int j;
/*----------------------------------------------------------
* Read in the data
*----------------------------------------------------------*/
fp = fopen(file_name, "r");
hypre_fscanf(fp, "%d", &size);
vector = hypre_SeqVectorCreate(size);
hypre_VectorMemoryLocation(vector) = HYPRE_MEMORY_HOST;
hypre_SeqVectorInitialize(vector);
data = hypre_VectorData(vector);
for (j = 0; j < size; j++)
{
hypre_fscanf(fp, "%le", &data[j]);
}
fclose(fp);
/* multivector code not written yet */
hypre_assert( hypre_VectorNumVectors(vector) == 1 );
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorPrint( hypre_Vector *vector,
char *file_name )
{
FILE *fp;
HYPRE_Complex *data;
HYPRE_Int size, num_vectors, vecstride, idxstride;
HYPRE_Int i, j;
HYPRE_Complex value;
HYPRE_Int ierr = 0;
num_vectors = hypre_VectorNumVectors(vector);
vecstride = hypre_VectorVectorStride(vector);
idxstride = hypre_VectorIndexStride(vector);
/*----------------------------------------------------------
* Print in the data
*----------------------------------------------------------*/
data = hypre_VectorData(vector);
size = hypre_VectorSize(vector);
fp = fopen(file_name, "w");
if ( hypre_VectorNumVectors(vector) == 1 )
{
hypre_fprintf(fp, "%d\n", size);
}
else
{
hypre_fprintf(fp, "%d vectors of size %d\n", num_vectors, size );
}
if ( num_vectors>1 )
{
for ( j=0; j<num_vectors; ++j )
{
hypre_fprintf(fp, "vector %d\n", j );
for (i = 0; i < size; i++)
{
value = data[ j*vecstride + i*idxstride ];
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%.14e , %.14e\n",
hypre_creal(value), hypre_cimag(value));
#else
hypre_fprintf(fp, "%.14e\n", value);
#endif
}
}
}
else
{
for (i = 0; i < size; i++)
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%.14e , %.14e\n",
hypre_creal(data[i]), hypre_cimag(data[i]));
#else
hypre_fprintf(fp, "%.14e\n", data[i]);
#endif
}
}
fclose(fp);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetConstantValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetConstantValues( hypre_Vector *v,
HYPRE_Complex value )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *vector_data = hypre_VectorData(v);
HYPRE_Int size = hypre_VectorSize(v);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(v);
//hypre_SeqVectorPrefetch(v, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
HYPRE_THRUST_CALL( fill_n, vector_data, size, value );
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(vector_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
vector_data[i] = value;
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetRandomValues
*
* returns vector of values randomly distributed between -1.0 and +1.0
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetRandomValues( hypre_Vector *v,
HYPRE_Int seed )
{
HYPRE_Complex *vector_data = hypre_VectorData(v);
HYPRE_Int size = hypre_VectorSize(v);
HYPRE_Int i;
HYPRE_Int ierr = 0;
hypre_SeedRand(seed);
size *= hypre_VectorNumVectors(v);
if (hypre_GetActualMemLocation(hypre_VectorMemoryLocation(v)) == hypre_MEMORY_HOST)
{
/* RDF: threading this loop may cause problems because of hypre_Rand() */
for (i = 0; i < size; i++)
{
vector_data[i] = 2.0 * hypre_Rand() - 1.0;
}
}
else
{
HYPRE_Complex *h_data = hypre_TAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
for (i = 0; i < size; i++)
{
h_data[i] = 2.0 * hypre_Rand() - 1.0;
}
hypre_TMemcpy(vector_data, h_data, HYPRE_Complex, size, hypre_VectorMemoryLocation(v), HYPRE_MEMORY_HOST);
hypre_TFree(h_data, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCopy
* copies data from x to y
* if size of x is larger than y only the first size_y elements of x are
* copied to y
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorCopy( hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Int ierr = 0;
size_t size = hypre_min( hypre_VectorSize(x), hypre_VectorSize(y) ) * hypre_VectorNumVectors(x);
hypre_TMemcpy( hypre_VectorData(y),
hypre_VectorData(x),
HYPRE_Complex,
size,
hypre_VectorMemoryLocation(y),
hypre_VectorMemoryLocation(x) );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneDeep
* Returns a complete copy of x - a deep copy, with its own copy of the data.
*--------------------------------------------------------------------------*/
hypre_Vector*
hypre_SeqVectorCloneDeep_v2( hypre_Vector *x, HYPRE_MemoryLocation memory_location )
{
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector *y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_SeqVectorInitialize_v2(y, memory_location);
hypre_SeqVectorCopy( x, y );
return y;
}
hypre_Vector*
hypre_SeqVectorCloneDeep( hypre_Vector *x )
{
return hypre_SeqVectorCloneDeep_v2(x, hypre_VectorMemoryLocation(x));
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneShallow
* Returns a complete copy of x - a shallow copy, pointing the data of x
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCloneShallow( hypre_Vector *x )
{
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_VectorMemoryLocation(y) = hypre_VectorMemoryLocation(x);
hypre_VectorData(y) = hypre_VectorData(x);
hypre_SeqVectorSetDataOwner( y, 0 );
hypre_SeqVectorInitialize(y);
return y;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorScale
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorScale( HYPRE_Complex alpha,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(y);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(y);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDscal(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, y_data, 1) );
#else
HYPRE_THRUST_CALL( transform, y_data, y_data + size, y_data, alpha * _1 );
#endif
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] *= alpha;
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorAxpy( HYPRE_Complex alpha,
hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDaxpy(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, x_data, 1, y_data, 1) );
#else
HYPRE_THRUST_CALL( transform, x_data, x_data + size, y_data, y_data, alpha * _1 + _2 );
#endif
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha * x_data[i];
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInnerProd
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_SeqVectorInnerProd( hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Real result = 0.0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#ifndef HYPRE_COMPLEX
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDdot(hypre_HandleCublasHandle(hypre_handle()), size, x_data, 1, y_data, 1, &result) );
#else
result = HYPRE_THRUST_CALL( inner_product, x_data, x_data + size, y_data, 0.0 );
#endif
#else
/* TODO */
#error "Complex inner product"
#endif
#else /* #if defined(HYPRE_USING_CUDA) */
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) reduction(+:result) is_device_ptr(y_data,x_data) map(result)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
result += hypre_conj(y_data[i]) * x_data[i];
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return result;
}
//TODO
/*--------------------------------------------------------------------------
* hypre_VectorSumElts:
* Returns the sum of all vector elements.
*--------------------------------------------------------------------------*/
HYPRE_Complex hypre_SeqVectorSumElts( hypre_Vector *vector )
{
HYPRE_Complex sum = 0;
HYPRE_Complex *data = hypre_VectorData( vector );
HYPRE_Int size = hypre_VectorSize( vector );
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE
#endif
for ( i=0; i<size; ++i ) sum += data[i];
return sum;
}
HYPRE_Int
hypre_SeqVectorPrefetch( hypre_Vector *x, HYPRE_MemoryLocation memory_location)
{
HYPRE_Int ierr = 0;
#ifdef HYPRE_USING_UNIFIED_MEMORY
if (hypre_VectorMemoryLocation(x) != HYPRE_MEMORY_DEVICE)
{
/* hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! CUDA Prefetch with non-unified momory\n");*/
return 1;
}
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Int size = hypre_VectorSize(x) * hypre_VectorNumVectors(x);
if (size == 0)
{
return ierr;
}
hypre_MemPrefetch(x_data, sizeof(HYPRE_Complex)*size, memory_location);
#endif
return ierr;
}
#if 0
/* y[i] = max(alpha*x[i], beta*y[i]) */
HYPRE_Int
hypre_SeqVectorMax( HYPRE_Complex alpha,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
thrust::maximum<HYPRE_Complex> mx;
#if defined(HYPRE_USING_CUDA)
HYPRE_THRUST_CALL( transform,
thrust::make_transform_iterator(x_data, alpha * _1),
thrust::make_transform_iterator(x_data + size, alpha * _1),
thrust::make_transform_iterator(y_data, beta * _1),
y_data,
mx );
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += hypre_max(alpha * x_data[i], beta * y_data[i]);
}
#endif /* defined(HYPRE_USING_CUDA) */
hypre_SyncCudaComputeStream(hypre_handle());
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
#endif
|
convolution_1x1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv1x1s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel0[1];
const float k2 = kernel0[2];
const float k3 = kernel0[3];
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _pn = vld1q_f32(r0+4);
float32x4_t _outp = vld1q_f32(outptr);
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vfmaq_f32(_outp, _p, _k0);
_outpn = vfmaq_f32(_outpn, _pn, _k0);
float32x4_t _p1 = vld1q_f32(r1);
float32x4_t _p1n = vld1q_f32(r1+4);
_outp = vfmaq_f32(_outp, _p1, _k1);
_outpn = vfmaq_f32(_outpn, _p1n, _k1);
float32x4_t _p2 = vld1q_f32(r2);
float32x4_t _p2n = vld1q_f32(r2+4);
_outp = vfmaq_f32(_outp, _p2, _k2);
_outpn = vfmaq_f32(_outpn, _p2n, _k2);
float32x4_t _p3 = vld1q_f32(r3);
float32x4_t _p3n = vld1q_f32(r3+4);
_outp = vfmaq_f32(_outp, _p3, _k3);
_outpn = vfmaq_f32(_outpn, _p3n, _k3);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128] \n"
"vmla.f32 q0, q2, %q12 \n"
"vmla.f32 q1, q3, %q12 \n"
"pld [%3, #256] \n"
"vld1.f32 {d4-d7}, [%3 :128]! \n"
"vmla.f32 q0, q2, %q13 \n"
"vmla.f32 q1, q3, %q13 \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4 :128]! \n"
"vmla.f32 q0, q2, %q14 \n"
"vmla.f32 q1, q3, %q14 \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q0, q2, %q15 \n"
"vmla.f32 q1, q3, %q15 \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
float sum1 = *r1 * k1;
float sum2 = *r2 * k2;
float sum3 = *r3 * k3;
*outptr += sum + sum1 + sum2 + sum3;
r0++;
r1++;
r2++;
r3++;
outptr++;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float* r0 = img0;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _outp = vld1q_f32(outptr);
float32x4_t _pn = vld1q_f32(r0+4);
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vfmaq_f32(_outp, _p, _k0);
_outpn = vfmaq_f32(_outpn, _pn, _k0);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 8;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128] \n"
"vmla.f32 q0, q2, %q6 \n"
"vmla.f32 q1, q3, %q6 \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0) // %2
: "0"(nn),
"1"(outptr),
"2"(r0),
"w"(_k0) // %6
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
*outptr += sum;
r0++;
outptr++;
}
}
}
}
static void conv1x1s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel0[1];
const float k2 = kernel0[2];
const float k3 = kernel0[3];
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4x2_t _px2 = vld2q_f32(r0);
float32x4_t _p = _px2.val[0];
float32x4_t _outp = vld1q_f32(outptr);
float32x4x2_t _pnx2 = vld2q_f32(r0+8);
float32x4_t _pn = _pnx2.val[0];
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vmlaq_f32(_outp, _p, _k0);
_outpn = vmlaq_f32(_outpn, _pn, _k0);
float32x4x2_t _p1x2 = vld2q_f32(r1);
float32x4_t _p1 = _p1x2.val[0];
float32x4x2_t _p1nx2 = vld2q_f32(r1+8);
float32x4_t _p1n = _p1nx2.val[0];
_outp = vmlaq_f32(_outp, _p1, _k1);
_outpn = vmlaq_f32(_outpn, _p1n, _k1);
float32x4x2_t _p2x2 = vld2q_f32(r2);
float32x4_t _p2 = _p2x2.val[0];
float32x4x2_t _p2nx2 = vld2q_f32(r2+8);
float32x4_t _p2n = _p2nx2.val[0];
_outp = vmlaq_f32(_outp, _p2, _k2);
_outpn = vmlaq_f32(_outpn, _p2n, _k2);
float32x4x2_t _p3x2 = vld2q_f32(r3);
float32x4_t _p3 = _p3x2.val[0];
float32x4x2_t _p3nx2 = vld2q_f32(r3+8);
float32x4_t _p3n = _p3nx2.val[0];
_outp = vmlaq_f32(_outp, _p3, _k3);
_outpn = vmlaq_f32(_outpn, _p3n, _k3);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1] \n"
"vmla.f32 q0, q2, %q12 \n"
"vmla.f32 q1, q8, %q12 \n"
"pld [%3, #512] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vld2.f32 {d16-d19}, [%3]! \n"
"vmla.f32 q0, q2, %q13 \n"
"vmla.f32 q1, q8, %q13 \n"
"pld [%4, #512] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vld2.f32 {d16-d19}, [%4]! \n"
"vmla.f32 q0, q2, %q14 \n"
"vmla.f32 q1, q8, %q14 \n"
"pld [%5, #512] \n"
"vld2.f32 {d4-d7}, [%5]! \n"
"vld2.f32 {d16-d19}, [%5]! \n"
"vmla.f32 q0, q2, %q15 \n"
"vmla.f32 q1, q8, %q15 \n"
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1]! \n"
"bne 0b \n"
"sub %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
float sum1 = *r1 * k1;
float sum2 = *r2 * k2;
float sum3 = *r3 * k3;
*outptr += sum + sum1 + sum2 + sum3;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float* r0 = img0;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4x2_t _px2 = vld2q_f32(r0);
float32x4_t _p = _px2.val[0];
float32x4_t _outp = vld1q_f32(outptr);
float32x4x2_t _pnx2 = vld2q_f32(r0+8);
float32x4_t _pn = _pnx2.val[0];
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vmlaq_f32(_outp, _p, _k0);
_outpn = vmlaq_f32(_outpn, _pn, _k0);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 16;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1] \n"
"vmla.f32 q0, q2, %q6 \n"
"vmla.f32 q1, q8, %q6 \n"
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1]! \n"
"bne 0b \n"
"sub %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0) // %2
: "0"(nn),
"1"(outptr),
"2"(r0),
"w"(_k0) // %6
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
*outptr += sum;
r0 += 2;
outptr++;
}
r0 += tailstep;
}
}
}
}
|
GB_unop__acos_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acos_fp64_fp64)
// op(A') function: GB (_unop_tran__acos_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = acos (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = acos (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = acos (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOS || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acos_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = acos (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = acos (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acos_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_bitmap_select_template.c | //------------------------------------------------------------------------------
// GB_bitmap_select_template: C=select(A,thunk) if A is bitmap or full
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Ab and Cb can be aliased, if A is bitmap and the selection is done in-place.
// Ax and Cx are not aliased.
// TODO: If done in-place, Cx can be passed as NULL. Then if A is not bitmap,
// C->b needs to be allocated, but not C->x.
// TODO: use a single GB_memcpy for the values, regardless of selectop
// the following macro is awkward but currently needed for the user_select op:
#undef GBI
#define GBI(Ai,p,avlen) i
{
int8_t *Ab = A->b ;
GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ;
const int64_t avlen = A->vlen ;
const int64_t avdim = A->vdim ;
const size_t asize = A->type->size ;
const int64_t anz = avlen * avdim ;
int64_t pA, cnvals = 0 ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:cnvals)
for (pA = 0 ; pA < anz ; pA++)
{
int64_t i = pA % avlen ;
int64_t j = pA / avlen ;
#if defined ( GB_ENTRY_SELECTOR )
// test the existence and value of A(i,j)
int8_t cb = GBB (Ab, pA) && GB_TEST_VALUE_OF_ENTRY (pA) ;
#else
// test the existence and position of A(i,j)
#if defined ( GB_TRIL_SELECTOR )
int8_t cb = GBB (Ab, pA) && (j-i <= ithunk) ;
#elif defined ( GB_TRIU_SELECTOR )
int8_t cb = GBB (Ab, pA) && (j-i >= ithunk) ;
#elif defined ( GB_DIAG_SELECTOR )
int8_t cb = GBB (Ab, pA) && (j-i == ithunk) ;
#elif defined ( GB_OFFDIAG_SELECTOR )
int8_t cb = GBB (Ab, pA) && (j-i != ithunk) ;
#else
ASSERT (GB_DEAD_CODE) ;
#endif
#endif
Cb [pA] = cb ;
cnvals += cb ;
{
// Cx [pA] = Ax [pA]
GB_SELECT_ENTRY (Cx, pA, Ax, pA) ;
}
}
(*cnvals_handle) = cnvals ;
}
|
residual_based_bossak_velocity_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
// Suneth Warnakulasuriya
//
#if !defined(KRATOS_RESIDUAL_BASED_BOSSAK_VELOCITY_SCHEME_H_INCLUDED)
#define KRATOS_RESIDUAL_BASED_BOSSAK_VELOCITY_SCHEME_H_INCLUDED
// System includes
#include <limits>
#include <vector>
// External includes
// Project includes
#include "custom_strategies/relaxed_dof_updater.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "solving_strategies/schemes/scheme.h"
#include "utilities/time_discretization.h"
namespace Kratos
{
///@name Kratos Classes
///@{
/// A scheme for steady and dynamic equations, using Bossak time integration.
/**
* It can be used for either first- or second-order time derivatives. Elements
* and conditions must provide a specialization of SchemeExtension via
* their data value container, which allows the scheme to operate independently
* of the variable arrangements in the element or condition.
*/
template <class TSparseSpace, class TDenseSpace>
class ResidualBasedBossakVelocityScheme : public Scheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedBossakVelocityScheme);
using BaseType = Scheme<TSparseSpace, TDenseSpace>;
using SystemMatrixType = typename BaseType::TSystemMatrixType;
using SystemVectorType = typename BaseType::TSystemVectorType;
using LocalSystemVectorType = typename BaseType::LocalSystemVectorType;
using LocalSystemMatrixType = typename BaseType::LocalSystemMatrixType;
using DofsArrayType = typename BaseType::DofsArrayType;
using NodeType = ModelPart::NodeType;
using IndexType = std::size_t;
///@}
///@name Life Cycle
///@{
/// Constructor.
ResidualBasedBossakVelocityScheme(
const double AlphaBossak,
const double RelaxationFactor,
const std::vector<Variable<double> const*> rDisplacementVariables,
const std::vector<Variable<double> const*> rVelocityVariables,
const std::vector<Variable<double> const*> rAccelerationVariables,
const std::vector<VariableComponent<VectorComponentAdaptor<array_1d<double, 3>>> const*> rDisplacementComponentVariables,
const std::vector<VariableComponent<VectorComponentAdaptor<array_1d<double, 3>>> const*> rVelocityComponentVariables,
const std::vector<VariableComponent<VectorComponentAdaptor<array_1d<double, 3>>> const*> rAccelerationComponentVariables)
: mAlphaBossak(AlphaBossak),
mUpdateAcceleration(rAccelerationVariables.size() > 0 ||
rAccelerationComponentVariables.size() > 0),
mUpdateDisplacement(rDisplacementVariables.size() > 0 ||
rDisplacementComponentVariables.size() > 0),
mRelaxationFactor(RelaxationFactor),
mDisplacementVariables(rDisplacementVariables),
mVelocityVariables(rVelocityVariables),
mAccelerationVariables(rAccelerationVariables),
mDisplacementComponentVariables(rDisplacementComponentVariables),
mVelocityComponentVariables(rVelocityComponentVariables),
mAccelerationComponentVariables(rAccelerationComponentVariables)
{
KRATOS_INFO("ResidualBasedBossakVelocityScheme")
<< " Using bossak velocity scheme with alpha_bossak = " << std::scientific
<< mAlphaBossak << " [UpdateAcceleration: " << mUpdateAcceleration
<< ", UpdateDisplacement: " << mUpdateDisplacement << "]\n";
// Allocate auxiliary memory.
const int num_threads = OpenMPUtils::GetNumThreads();
mMassMatrix.resize(num_threads);
mDampingMatrix.resize(num_threads);
mValuesVector.resize(num_threads);
mSecondDerivativeValuesVector.resize(num_threads);
mSecondDerivativeValuesVectorOld.resize(num_threads);
}
/// Destructor.
~ResidualBasedBossakVelocityScheme() override = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void InitializeSolutionStep(ModelPart& rModelPart,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
const double delta_time = rModelPart.GetProcessInfo()[DELTA_TIME];
KRATOS_ERROR_IF(delta_time < std::numeric_limits<double>::epsilon())
<< "detected delta_time = 0 in the Bossak Scheme ... "
"check if the time step is created correctly for "
"the current model part.";
ResidualBasedBossakVelocityScheme::CalculateBossakConstants(
mBossak, mAlphaBossak, delta_time);
#pragma omp critical
{
rModelPart.GetProcessInfo()[BOSSAK_ALPHA] = mBossak.Alpha;
}
KRATOS_CATCH("");
}
void Update(ModelPart& rModelPart,
DofsArrayType& rDofSet,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
mpDofUpdater->UpdateDofs(rDofSet, rDx, mRelaxationFactor);
this->UpdateTimeSchemeVariables(rModelPart);
KRATOS_CATCH("");
}
void CalculateSystemContributions(Element::Pointer pCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
const int k = OpenMPUtils::ThisThread();
(pCurrentElement)->InitializeNonLinearIteration(rCurrentProcessInfo);
(pCurrentElement)->CalculateLocalSystem(rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
(pCurrentElement)->CalculateLocalVelocityContribution(mDampingMatrix[k], rRHS_Contribution, rCurrentProcessInfo);
if (mUpdateAcceleration)
{
(pCurrentElement)->CalculateMassMatrix(mMassMatrix[k], rCurrentProcessInfo);
AddDynamicsToRHS(pCurrentElement, rRHS_Contribution, mDampingMatrix[k],
mMassMatrix[k], rCurrentProcessInfo);
}
AddDynamicsToLHS(rLHS_Contribution, mDampingMatrix[k], mMassMatrix[k],
rCurrentProcessInfo);
(pCurrentElement)->EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Calculate_RHS_Contribution(Element::Pointer pCurrentElement,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
const int k = OpenMPUtils::ThisThread();
// Initializing the non linear iteration for the current element
(pCurrentElement)->InitializeNonLinearIteration(rCurrentProcessInfo);
// basic operations for the element considered
(pCurrentElement)->CalculateRightHandSide(rRHS_Contribution, rCurrentProcessInfo);
(pCurrentElement)->CalculateLocalVelocityContribution(mDampingMatrix[k], rRHS_Contribution, rCurrentProcessInfo);
(pCurrentElement)->EquationIdVector(rEquationId, rCurrentProcessInfo);
// adding the dynamic contributions (static is already included)
if (mUpdateAcceleration)
{
(pCurrentElement)->CalculateMassMatrix(mMassMatrix[k], rCurrentProcessInfo);
AddDynamicsToRHS(pCurrentElement, rRHS_Contribution, mDampingMatrix[k],
mMassMatrix[k], rCurrentProcessInfo);
}
}
void Condition_CalculateSystemContributions(Condition::Pointer pCurrentCondition,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Condition::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY
const int k = OpenMPUtils::ThisThread();
(pCurrentCondition)->InitializeNonLinearIteration(rCurrentProcessInfo);
(pCurrentCondition)->CalculateLocalSystem(rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
(pCurrentCondition)->CalculateLocalVelocityContribution(mDampingMatrix[k], rRHS_Contribution, rCurrentProcessInfo);
(pCurrentCondition)->EquationIdVector(rEquationId, rCurrentProcessInfo);
if (mUpdateAcceleration)
{
(pCurrentCondition)->CalculateMassMatrix(mMassMatrix[k], rCurrentProcessInfo);
AddDynamicsToRHS(pCurrentCondition, rRHS_Contribution,
mDampingMatrix[k], mMassMatrix[k], rCurrentProcessInfo);
}
AddDynamicsToLHS(rLHS_Contribution, mDampingMatrix[k], mMassMatrix[k],
rCurrentProcessInfo);
KRATOS_CATCH("")
}
void Condition_Calculate_RHS_Contribution(Condition::Pointer pCurrentCondition,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
const int k = OpenMPUtils::ThisThread();
(pCurrentCondition)->InitializeNonLinearIteration(rCurrentProcessInfo);
(pCurrentCondition)->CalculateRightHandSide(rRHS_Contribution, rCurrentProcessInfo);
(pCurrentCondition)->CalculateLocalVelocityContribution(mDampingMatrix[k], rRHS_Contribution, rCurrentProcessInfo);
(pCurrentCondition)->EquationIdVector(rEquationId, rCurrentProcessInfo);
// adding the dynamic contributions (static is already included)
if (mUpdateAcceleration)
{
(pCurrentCondition)->CalculateMassMatrix(mMassMatrix[k], rCurrentProcessInfo);
AddDynamicsToRHS(pCurrentCondition, rRHS_Contribution,
mDampingMatrix[k], mMassMatrix[k], rCurrentProcessInfo);
}
KRATOS_CATCH("");
}
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBossakVelocityScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
struct BossakConstants
{
double Alpha;
double Gamma;
double Beta;
double C0;
double C1;
double C2;
double C3;
double C4;
double C5;
double C6;
};
///@}
///@name Protected member Variables
///@{
std::vector<LocalSystemVectorType> mSecondDerivativeValuesVectorOld;
std::vector<LocalSystemVectorType> mSecondDerivativeValuesVector;
std::vector<LocalSystemVectorType> mValuesVector;
std::vector<LocalSystemMatrixType> mMassMatrix;
std::vector<LocalSystemMatrixType> mDampingMatrix;
const double mAlphaBossak;
bool mUpdateAcceleration;
bool mUpdateDisplacement;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
//****************************************************************************
/**
Kdyn = am*M + D + a1*K
*/
void AddDynamicsToLHS(LocalSystemMatrixType& rLHS_Contribution,
LocalSystemMatrixType& rDampingMatrix,
LocalSystemMatrixType& rMassMatrix,
ProcessInfo& CurrentProcessInfo)
{
// multipling time scheme factor
rLHS_Contribution *= mBossak.C1;
// adding mass contribution to the dynamic stiffness
if (rMassMatrix.size1() != 0 && mUpdateAcceleration) // if M matrix declared
{
noalias(rLHS_Contribution) += mBossak.C0 * rMassMatrix;
}
// adding damping contribution
if (rDampingMatrix.size1() != 0) // if M matrix declared
{
noalias(rLHS_Contribution) += rDampingMatrix;
}
}
//****************************************************************************
/// Add Bossak contributions from the inertial term to the RHS vector.
/** This essentially performs bdyn = b - M*acc for the current element.
* Note that viscous/pressure contributions to the RHS are expected to be added by the element itself.
* @param[in] rCurrentElement The fluid element we are assembling.
* @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added.
* @param[in] rD The elemental velocity/pressure LHS matrix.
* @param[in] rM The elemental acceleration LHS matrix.
* @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart.
*/
void AddDynamicsToRHS(Element::Pointer rCurrentElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rDampingMatrix,
LocalSystemMatrixType& rMassMatrix,
ProcessInfo& rCurrentProcessInfo)
{
// adding inertia contribution
if (rMassMatrix.size1() != 0)
{
const int k = OpenMPUtils::ThisThread();
rCurrentElement->GetSecondDerivativesVector(
mSecondDerivativeValuesVector[k], 0);
(mSecondDerivativeValuesVector[k]) *= (1.00 - mBossak.Alpha);
rCurrentElement->GetSecondDerivativesVector(
mSecondDerivativeValuesVectorOld[k], 1);
noalias(mSecondDerivativeValuesVector[k]) +=
mBossak.Alpha * mSecondDerivativeValuesVectorOld[k];
noalias(rRHS_Contribution) -=
prod(rMassMatrix, mSecondDerivativeValuesVector[k]);
}
}
/// Add Bossak contributions from the inertial term to the RHS vector.
/** This essentially performs bdyn = b - M*acc for the current condition.
* Note that viscous/pressure contributions to the RHS are expected to be added by the element condition.
* @param[in] rCurrentCondition The fluid condition we are assembling.
* @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added.
* @param[in] rD The elemental velocity/pressure LHS matrix.
* @param[in] rM The elemental acceleration LHS matrix.
* @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart.
*/
void AddDynamicsToRHS(Condition::Pointer rCurrentCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rDampingMatrix,
LocalSystemMatrixType& rMassMatrix,
ProcessInfo& rCurrentProcessInfo)
{
// adding inertia contribution
if (rMassMatrix.size1() != 0)
{
const int k = OpenMPUtils::ThisThread();
rCurrentCondition->GetSecondDerivativesVector(
mSecondDerivativeValuesVector[k], 0);
(mSecondDerivativeValuesVector[k]) *= (1.00 - mBossak.Alpha);
rCurrentCondition->GetSecondDerivativesVector(
mSecondDerivativeValuesVectorOld[k], 1);
noalias(mSecondDerivativeValuesVector[k]) +=
mBossak.Alpha * mSecondDerivativeValuesVectorOld[k];
noalias(rRHS_Contribution) -=
prod(rMassMatrix, mSecondDerivativeValuesVector[k]);
}
}
void UpdateTimeSchemeVariables(ModelPart& rModelPart)
{
KRATOS_TRY;
UpdateAcceleration<Variable<double>>(rModelPart, mVelocityVariables,
mAccelerationVariables);
UpdateAcceleration<VariableComponent<VectorComponentAdaptor<array_1d<double, 3>>>>(
rModelPart, mVelocityComponentVariables, mAccelerationComponentVariables);
UpdateDisplacement<Variable<double>>(rModelPart, mDisplacementVariables,
mVelocityVariables, mAccelerationVariables);
UpdateDisplacement<VariableComponent<VectorComponentAdaptor<array_1d<double, 3>>>>(
rModelPart, mDisplacementComponentVariables,
mVelocityComponentVariables, mAccelerationComponentVariables);
KRATOS_CATCH("");
}
void UpdateAcceleration(double& rCurrentAcceleration,
const double CurrentVelocity,
const double OldVelocity,
const double OldAcceleration) const
{
rCurrentAcceleration = mBossak.C2 * (CurrentVelocity - OldVelocity) -
mBossak.C3 * OldAcceleration;
}
void UpdateDisplacement(double& rCurrentDisplacement,
const double OldDisplacement,
const double OldVelocity,
const double CurrentAcceleration,
const double OldAcceleration) const
{
rCurrentDisplacement = OldDisplacement + mBossak.C6 * OldVelocity +
mBossak.C4 * OldAcceleration + mBossak.C5 * CurrentAcceleration;
}
static void CalculateBossakConstants(BossakConstants& rBossakConstants,
const double Alpha,
const double DeltaTime)
{
TimeDiscretization::Bossak bossak(Alpha, 0.25, 0.5);
rBossakConstants.Alpha = bossak.GetAlphaM();
rBossakConstants.Gamma = bossak.GetGamma();
rBossakConstants.Beta = bossak.GetBeta();
rBossakConstants.C0 =
(1.0 - rBossakConstants.Alpha) / (rBossakConstants.Gamma * DeltaTime);
rBossakConstants.C1 =
DeltaTime / (rBossakConstants.Beta * rBossakConstants.Gamma);
rBossakConstants.C2 = 1.0 / (rBossakConstants.Gamma * DeltaTime);
rBossakConstants.C3 = (1.0 - rBossakConstants.Gamma) / rBossakConstants.Gamma;
rBossakConstants.C4 =
std::pow(DeltaTime, 2) * (-2.0 * rBossakConstants.Beta + 1.0) / 2.0;
rBossakConstants.C5 = std::pow(DeltaTime, 2) * rBossakConstants.Beta;
rBossakConstants.C6 = DeltaTime;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
using DofUpdaterType = RelaxedDofUpdater<TSparseSpace>;
using DofUpdaterPointerType = typename DofUpdaterType::UniquePointer;
DofUpdaterPointerType mpDofUpdater = Kratos::make_unique<DofUpdaterType>();
double mRelaxationFactor;
const std::vector<Variable<double> const*> mDisplacementVariables;
const std::vector<Variable<double> const*> mVelocityVariables;
const std::vector<Variable<double> const*> mAccelerationVariables;
const std::vector<VariableComponent<VectorComponentAdaptor<array_1d<double, 3>>> const*> mDisplacementComponentVariables;
const std::vector<VariableComponent<VectorComponentAdaptor<array_1d<double, 3>>> const*> mVelocityComponentVariables;
const std::vector<VariableComponent<VectorComponentAdaptor<array_1d<double, 3>>> const*> mAccelerationComponentVariables;
BossakConstants mBossak;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
// class to hold all the derivatives for updated target variable
template <class TVariableType>
void UpdateAcceleration(ModelPart& rModelPart,
const std::vector<TVariableType const*>& pVelocityVariables,
const std::vector<TVariableType const*>& pAccelerationVariables)
{
if (!mUpdateAcceleration)
return;
const int number_of_nodes = rModelPart.NumberOfNodes();
#pragma omp parallel for
for (int i_node = 0; i_node < number_of_nodes; ++i_node)
{
NodeType& r_node = *(rModelPart.NodesBegin() + i_node);
for (IndexType i_var = 0; i_var < pAccelerationVariables.size(); ++i_var)
{
double& r_current_acceleration =
r_node.FastGetSolutionStepValue(*pAccelerationVariables[i_var]);
const double old_acceleration = r_node.FastGetSolutionStepValue(
*pAccelerationVariables[i_var], 1);
const double current_velocity =
r_node.FastGetSolutionStepValue(*pVelocityVariables[i_var]);
const double old_velocity =
r_node.FastGetSolutionStepValue(*pVelocityVariables[i_var], 1);
UpdateAcceleration(r_current_acceleration, current_velocity,
old_velocity, old_acceleration);
}
}
}
template <class TVariableType>
void UpdateDisplacement(ModelPart& rModelPart,
const std::vector<TVariableType const*>& pDisplacementVariables,
const std::vector<TVariableType const*>& pVelocityVariables,
const std::vector<TVariableType const*>& pAccelerationVariables)
{
if (!mUpdateDisplacement)
return;
const int number_of_nodes = rModelPart.NumberOfNodes();
#pragma omp parallel for
for (int i_node = 0; i_node < number_of_nodes; ++i_node)
{
NodeType& r_node = *(rModelPart.NodesBegin() + i_node);
for (IndexType i_var = 0; i_var < pDisplacementVariables.size(); ++i_var)
{
double& r_current_displacement =
r_node.FastGetSolutionStepValue(*pDisplacementVariables[i_var]);
const double old_displacement = r_node.FastGetSolutionStepValue(
*pDisplacementVariables[i_var], 1);
const double current_acceleration =
r_node.FastGetSolutionStepValue(*pAccelerationVariables[i_var]);
const double old_acceleration = r_node.FastGetSolutionStepValue(
*pAccelerationVariables[i_var], 1);
const double old_velocity =
r_node.FastGetSolutionStepValue(*pVelocityVariables[i_var], 1);
UpdateDisplacement(r_current_displacement, old_displacement, old_velocity,
current_acceleration, old_acceleration);
}
}
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBossakVelocityScheme */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BOSSAK_VELOCITY_SCHEME_H_INCLUDED defined */
|
convolutiondepthwise_5x5.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*25;
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
float32x4_t _bias0 = vdupq_n_f32(bias0);
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// r1
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r10 r14 r18
"mov v8.16b, %25.16b \n"// v8 = _bias0
"mov v9.16b, %25.16b \n"// v9 = _bias0
"0: \n"
"mov v10.16b, %25.16b \n"// v10 = _bias0
"mov v11.16b, %25.16b \n"// v11 = _bias0
"fmla v8.4s, v16.4s, %19.s[1] \n"
"fmla v10.4s, v16.4s, %18.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r11
"fmla v9.4s, v17.4s, %19.s[1] \n"
"fmla v11.4s, v17.4s, %18.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r15
"fmla v8.4s, v17.4s, %20.s[1] \n"
"fmla v10.4s, v17.4s, %19.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r12
"fmla v9.4s, v18.4s, %20.s[1] \n"
"fmla v11.4s, v18.4s, %19.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r16
"fmla v8.4s, v19.4s, %19.s[2] \n"
"fmla v10.4s, v19.4s, %18.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r13
"fmla v9.4s, v20.4s, %19.s[2] \n"
"fmla v11.4s, v20.4s, %18.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r17
"fmla v8.4s, v21.4s, %19.s[3] \n"
"fmla v10.4s, v21.4s, %18.s[2] \n"
"add %4, %4, #32 \n"
"fmla v9.4s, v22.4s, %19.s[3] \n"
"fmla v11.4s, v22.4s, %18.s[2] \n"
// r2
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n"// v12 v13 v14 = r20 r24 r28
"fmla v8.4s, v19.4s, %20.s[0] \n"
"fmla v10.4s, v19.4s, %18.s[3] \n"
"fmla v9.4s, v20.4s, %20.s[0] \n"
"fmla v11.4s, v20.4s, %18.s[3] \n"
"add %5, %5, #32 \n"
"fmla v8.4s, v12.4s, %20.s[2] \n"
"fmla v10.4s, v12.4s, %19.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r21
"fmla v9.4s, v13.4s, %20.s[2] \n"
"fmla v11.4s, v13.4s, %19.s[1] \n"
"ext v22.16b, v13.16b, v14.16b, #4 \n"// r25
"fmla v8.4s, v13.4s, %21.s[2] \n"
"fmla v10.4s, v13.4s, %20.s[1] \n"
"ext v19.16b, v12.16b, v13.16b, #8 \n"// r22
"fmla v9.4s, v14.4s, %21.s[2] \n"
"fmla v11.4s, v14.4s, %20.s[1] \n"
"ext v20.16b, v13.16b, v14.16b, #8 \n"// r26
"fmla v8.4s, v21.4s, %20.s[3] \n"
"fmla v10.4s, v21.4s, %19.s[2] \n"
"ext v21.16b, v12.16b, v13.16b, #12 \n"// r23
"fmla v9.4s, v22.4s, %20.s[3] \n"
"fmla v11.4s, v22.4s, %19.s[2] \n"
"ext v22.16b, v13.16b, v14.16b, #12 \n"// r27
"fmla v8.4s, v19.4s, %21.s[0] \n"
"fmla v10.4s, v19.4s, %19.s[3] \n"
"fmla v9.4s, v20.4s, %21.s[0] \n"
"fmla v11.4s, v20.4s, %19.s[3] \n"
// r3
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n"// v16 v17 v18 = r30 r34 r38
"fmla v8.4s, v21.4s, %21.s[1] \n"
"fmla v10.4s, v21.4s, %20.s[0] \n"
"fmla v9.4s, v22.4s, %21.s[1] \n"
"fmla v11.4s, v22.4s, %20.s[0] \n"
"add %6, %6, #32 \n"
"fmla v8.4s, v16.4s, %21.s[3] \n"
"fmla v10.4s, v16.4s, %20.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r31
"fmla v9.4s, v17.4s, %21.s[3] \n"
"fmla v11.4s, v17.4s, %20.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r35
"fmla v8.4s, v17.4s, %22.s[3] \n"
"fmla v10.4s, v17.4s, %21.s[2] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r32
"fmla v9.4s, v18.4s, %22.s[3] \n"
"fmla v11.4s, v18.4s, %21.s[2] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r36
"fmla v8.4s, v19.4s, %22.s[0] \n"
"fmla v10.4s, v19.4s, %20.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r33
"fmla v9.4s, v20.4s, %22.s[0] \n"
"fmla v11.4s, v20.4s, %20.s[3] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r37
"fmla v8.4s, v21.4s, %22.s[1] \n"
"fmla v10.4s, v21.4s, %21.s[0] \n"
"fmla v9.4s, v22.4s, %22.s[1] \n"
"fmla v11.4s, v22.4s, %21.s[0] \n"
// r4
"prfm pldl1keep, [%7, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%7] \n"// v12 v13 v14 = r40 r44 r48
"fmla v8.4s, v19.4s, %22.s[2] \n"
"fmla v10.4s, v19.4s, %21.s[1] \n"
"add %7, %7, #32 \n"
"fmla v9.4s, v20.4s, %22.s[2] \n"
"fmla v11.4s, v20.4s, %21.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r41
"fmla v8.4s, v12.4s, %23.s[0] \n"
"fmla v10.4s, v12.4s, %21.s[3] \n"
"ext v22.16b, v13.16b, v14.16b, #4 \n"// r45
"fmla v9.4s, v13.4s, %23.s[0] \n"
"fmla v11.4s, v13.4s, %21.s[3] \n"
"ext v19.16b, v12.16b, v13.16b, #8 \n"// r42
"fmla v8.4s, v13.4s, %24.s[0] \n"
"fmla v10.4s, v13.4s, %22.s[3] \n"
"ext v20.16b, v13.16b, v14.16b, #8 \n"// r46
"fmla v9.4s, v14.4s, %24.s[0] \n"
"fmla v11.4s, v14.4s, %22.s[3] \n"
// r0 and r5
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%3] \n"// v16 v17 v18 = r00 r04 r08
"fmla v8.4s, v21.4s, %23.s[1] \n"
"fmla v10.4s, v21.4s, %22.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #12 \n"// r43
"fmla v9.4s, v22.4s, %23.s[1] \n"
"fmla v11.4s, v22.4s, %22.s[0] \n"
"ext v22.16b, v13.16b, v14.16b, #12 \n"// r47
"fmla v8.4s, v19.4s, %23.s[2] \n"
"fmla v10.4s, v19.4s, %22.s[1] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%8] \n"// v12 v13 v14 = r50 r54 r58
"fmla v9.4s, v20.4s, %23.s[2] \n"
"fmla v11.4s, v20.4s, %22.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r01
"fmla v8.4s, v21.4s, %23.s[3] \n"
"fmla v10.4s, v21.4s, %22.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #4 \n"// r51
"fmla v9.4s, v22.4s, %23.s[3] \n"
"fmla v11.4s, v22.4s, %22.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r05
"fmla v8.4s, v16.4s, %18.s[0] \n"
"fmla v10.4s, v12.4s, %23.s[0] \n"
"ext v24.16b, v13.16b, v14.16b, #4 \n"// r55
"fmla v9.4s, v17.4s, %18.s[0] \n"
"fmla v11.4s, v13.4s, %23.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v8.4s, v17.4s, %19.s[0] \n"
"fmla v10.4s, v13.4s, %24.s[0] \n"
"ext v25.16b, v12.16b, v13.16b, #8 \n"// r52
"fmla v9.4s, v18.4s, %19.s[0] \n"
"fmla v11.4s, v14.4s, %24.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r06
"fmla v8.4s, v19.4s, %18.s[1] \n"
"fmla v10.4s, v23.4s, %23.s[1] \n"
"ext v26.16b, v13.16b, v14.16b, #8 \n"// r56
"fmla v9.4s, v20.4s, %18.s[1] \n"
"fmla v11.4s, v24.4s, %23.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v8.4s, v21.4s, %18.s[2] \n"
"fmla v10.4s, v25.4s, %23.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r53
"fmla v9.4s, v22.4s, %18.s[2] \n"
"fmla v11.4s, v26.4s, %23.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r07
"fmla v8.4s, v19.4s, %18.s[3] \n"
"fmla v10.4s, v23.4s, %23.s[3] \n"
"ext v24.16b, v13.16b, v14.16b, #12 \n"// r57
"fmla v9.4s, v20.4s, %18.s[3] \n"
"add %3, %3, #32 \n"
"fmla v11.4s, v24.4s, %23.s[3] \n"
"add %8, %8, #32 \n"
// r1
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r10 r14 r18
"subs %w0, %w0, #1 \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"mov v8.16b, %25.16b \n"// v8 = _bias0
"mov v9.16b, %25.16b \n"// v9 = _bias0
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424), // %24
"w"(_bias0) // %25
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26"
);
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v12.4s, v13.4s}, [%3] \n"// v12 v13 = r10 r14
"mov v8.16b, %23.16b \n"// v8 = _bias0
"mov v9.16b, %23.16b \n"// v9 = _bias0
"fmul v10.4s, v12.4s, %17.s[1] \n"
"fmul v11.4s, v12.4s, %16.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r11
"fmla v8.4s, v13.4s, %18.s[1] \n"
"fmla v9.4s, v13.4s, %17.s[0] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n"// r12
"fmla v10.4s, v21.4s, %17.s[2] \n"
"fmla v11.4s, v21.4s, %16.s[1] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r13
"fmla v8.4s, v22.4s, %17.s[3] \n"
"fmla v9.4s, v22.4s, %16.s[2] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4s, v17.4s}, [%4] \n"// v16 v17 = r20 r24
"fmla v10.4s, v23.4s, %18.s[0] \n"
"fmla v11.4s, v23.4s, %16.s[3] \n"
"add %4, %4, #16 \n"
"fmla v8.4s, v16.4s, %18.s[2] \n"
"fmla v9.4s, v16.4s, %17.s[1] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r21
"fmla v10.4s, v17.4s, %19.s[2] \n"
"fmla v11.4s, v17.4s, %18.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r22
"fmla v8.4s, v18.4s, %18.s[3] \n"
"fmla v9.4s, v18.4s, %17.s[2] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r23
"fmla v10.4s, v19.4s, %19.s[0] \n"
"fmla v11.4s, v19.4s, %17.s[3] \n"
// r3
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v12.4s, v13.4s}, [%5] \n"// v12 v13 = r30 r34
"fmla v8.4s, v20.4s, %19.s[1] \n"
"fmla v9.4s, v20.4s, %18.s[0] \n"
"add %5, %5, #16 \n"
"fmla v10.4s, v12.4s, %19.s[3] \n"
"fmla v11.4s, v12.4s, %18.s[2] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r31
"fmla v8.4s, v13.4s, %20.s[3] \n"
"fmla v9.4s, v13.4s, %19.s[2] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n"// r32
"fmla v10.4s, v21.4s, %20.s[0] \n"
"fmla v11.4s, v21.4s, %18.s[3] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r33
"fmla v8.4s, v22.4s, %20.s[1] \n"
"fmla v9.4s, v22.4s, %19.s[0] \n"
// r4
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4s, v17.4s}, [%6] \n"// v16 v17 = r40 r44
"fmla v10.4s, v23.4s, %20.s[2] \n"
"fmla v11.4s, v23.4s, %19.s[1] \n"
"add %6, %6, #16 \n"
"fmla v8.4s, v16.4s, %21.s[0] \n"
"fmla v9.4s, v16.4s, %19.s[3] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r41
"fmla v10.4s, v17.4s, %22.s[0] \n"
"fmla v11.4s, v17.4s, %20.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r42
"fmla v8.4s, v18.4s, %21.s[1] \n"
"fmla v9.4s, v18.4s, %20.s[0] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r43
"fmla v10.4s, v19.4s, %21.s[2] \n"
"fmla v11.4s, v19.4s, %20.s[1] \n"
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4s, v17.4s}, [%2] \n"// v16 v17 = r00 r04
"fmla v8.4s, v20.4s, %21.s[3] \n"
"fmla v9.4s, v20.4s, %20.s[2] \n"
// r5
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v12.4s, v13.4s}, [%7] \n"// v12 v13 = r50 r54
"fmla v10.4s, v16.4s, %16.s[0] \n"
"fmla v11.4s, v12.4s, %21.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r01
"fmla v8.4s, v17.4s, %17.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r51
"fmla v9.4s, v13.4s, %22.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v10.4s, v18.4s, %16.s[1] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n"// r52
"fmla v11.4s, v21.4s, %21.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v8.4s, v19.4s, %16.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r53
"fmla v9.4s, v22.4s, %21.s[2] \n"
"add %3, %3, #16 \n"
"fmla v10.4s, v20.4s, %16.s[3] \n"
"fmla v11.4s, v23.4s, %21.s[3] \n"
"add %2, %2, #16 \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"add %7, %7, #16 \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
: "=r"(outptr), // %0
"=r"(outptr2), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5) // %7
: "0"(outptr),
"1"(outptr2),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"w"(_k0123), // %16
"w"(_k4567), // %17
"w"(_k891011), // %18
"w"(_k12131415), // %19
"w"(_k16171819), // %20
"w"(_k20212223), // %21
"w"(_k24242424), // %22
"w"(_bias0) // %23
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
#else
if (nn > 0)
{
asm volatile(
// r1
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4] \n"// q14 q15 = r10 r14
"vmov q8, %q25 \n"// q8 = _bias0
"0: \n"
"vmov q9, %q25 \n"// q9 = _bias0
"vmla.f32 q8, q14, %e19[1] \n"
"vmla.f32 q9, q14, %e18[0] \n"
"vext.32 q12, q14, q15, #1 \n"// r11
"vmla.f32 q8, q15, %e20[1] \n"
"vmla.f32 q9, q15, %e19[0] \n"
"vext.32 q13, q14, q15, #2 \n"// r12
"vmla.f32 q8, q12, %f19[0] \n"
"vmla.f32 q9, q12, %e18[1] \n"
"vext.32 q12, q14, q15, #3 \n"// r13
"vmla.f32 q8, q13, %f19[1] \n"
"vmla.f32 q9, q13, %f18[0] \n"
// r2
"pld [%5, #256] \n"
"vld1.f32 {d20-d23}, [%5] \n"// q10 q11 = r20 r24
"vmla.f32 q8, q12, %e20[0] \n"
"vmla.f32 q9, q12, %f18[1] \n"
"add %5, #16 \n"
"vmla.f32 q8, q10, %f20[0] \n"
"vmla.f32 q9, q10, %e19[1] \n"
"vext.32 q12, q10, q11, #1 \n"// r21
"vmla.f32 q8, q11, %f21[0] \n"
"vmla.f32 q9, q11, %e20[1] \n"
"vext.32 q13, q10, q11, #2 \n"// r22
"vmla.f32 q8, q12, %f20[1] \n"
"vmla.f32 q9, q12, %f19[0] \n"
"vext.32 q12, q10, q11, #3 \n"// r23
"vmla.f32 q8, q13, %e21[0] \n"
"vmla.f32 q9, q13, %f19[1] \n"
// r3
"pld [%6, #256] \n"
"vld1.f32 {d28-d31}, [%6] \n"// q14 q15 = r30 r34
"vmla.f32 q8, q12, %e21[1] \n"
"vmla.f32 q9, q12, %e20[0] \n"
"add %6, #16 \n"
"vmla.f32 q8, q14, %f21[1] \n"
"vmla.f32 q9, q14, %f20[0] \n"
"vext.32 q12, q14, q15, #1 \n"// r31
"vmla.f32 q8, q15, %f22[1] \n"
"vmla.f32 q9, q15, %f21[0] \n"
"vext.32 q13, q14, q15, #2 \n"// r32
"vmla.f32 q8, q12, %e22[0] \n"
"vmla.f32 q9, q12, %f20[1] \n"
"vext.32 q12, q14, q15, #3 \n"// r33
"vmla.f32 q8, q13, %e22[1] \n"
"vmla.f32 q9, q13, %e21[0] \n"
// r4
"pld [%7, #256] \n"
"vld1.f32 {d20-d23}, [%7] \n"// q10 q11 = r40 r44
"vmla.f32 q8, q12, %f22[0] \n"
"vmla.f32 q9, q12, %e21[1] \n"
"add %7, #16 \n"
"vmla.f32 q8, q10, %e23[0] \n"
"vmla.f32 q9, q10, %f21[1] \n"
"vext.32 q12, q10, q11, #1 \n"// r41
"vmla.f32 q8, q11, %e24[0] \n"
"vmla.f32 q9, q11, %f22[1] \n"
"vext.32 q13, q10, q11, #2 \n"// r42
"vmla.f32 q8, q12, %e23[1] \n"
"vmla.f32 q9, q12, %e22[0] \n"
"vext.32 q12, q10, q11, #3 \n"// r43
"vmla.f32 q8, q13, %f23[0] \n"
"vmla.f32 q9, q13, %e22[1] \n"
// r0 and r5
"pld [%3, #256] \n"
"vld1.f32 {d20-d23}, [%3] \n"// q10 q11 = r00 r04
"vmla.f32 q8, q12, %f23[1] \n"
"vmla.f32 q9, q12, %f22[0] \n"
// r5
"pld [%8, #256] \n"
"vld1.f32 {d28-d31}, [%8] \n"// q14 q15 = r50 r54
"vmla.f32 q8, q10, %e18[0] \n"
"vmla.f32 q9, q14, %e23[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r01
"vmla.f32 q8, q11, %e19[0] \n"
"vmla.f32 q9, q15, %e24[0] \n"
"vext.32 q13, q14, q15, #1 \n"// r51
"vmla.f32 q8, q12, %e18[1] \n"
"vext.32 q12, q10, q11, #2 \n"// r02
"vmla.f32 q9, q13, %e23[1] \n"
"vext.32 q13, q14, q15, #2 \n"// r52
"vmla.f32 q8, q12, %f18[0] \n"
"vext.32 q12, q10, q11, #3 \n"// r03
"vmla.f32 q9, q13, %f23[0] \n"
"vext.32 q13, q14, q15, #3 \n"// r33
"vmla.f32 q8, q12, %f18[1] \n"
"add %3, #16 \n"
"vmla.f32 q9, q13, %f23[1] \n"
"add %4, #16 \n"
// r1
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4] \n"// q14 q15 = r10 r14
"add %8, #16 \n"
"vst1.f32 {d16-d17}, [%1]! \n"
"vmov q8, %q25 \n"// q8 = _bias0
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%2]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424), // %24
"w"(_bias0) // %25
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = bias0;
float sum2 = bias0;
#if __ARM_NEON
// TODO neon assembly optimize
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _sum = vmulq_f32(_r1, _k1);
float32x4_t _sum2 = vmulq_f32(_r1, _k0123);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _k2 = vld1q_f32(k2);
_sum = vmlaq_f32(_sum, _r2, _k2);
_sum2 = vmlaq_f32(_sum2, _r2, _k1);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k3 = vld1q_f32(k3);
_sum = vmlaq_f32(_sum, _r3, _k3);
_sum2 = vmlaq_f32(_sum2, _r3, _k2);
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
_sum2 = vmlaq_f32(_sum2, _r4, _k3);
float32x4_t _r0 = vld1q_f32(r0);
_sum = vmlaq_f32(_sum, _r0, _k0123);
float32x4_t _r5 = vld1q_f32(r5);
_sum2 = vmlaq_f32(_sum2, _r5, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum += r4[4] * k4[4];
_r_t4 = vextq_f32(_r_t4, _r_t4, 1);
_r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3);
_sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4);
sum2 += r5[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2);
sum += vget_lane_f32(_ss_ss2, 0);
sum2 += vget_lane_f32(_ss_ss2, 1);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
#endif // __ARM_NEON
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// v10 v11
// r0
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n"// v16 v17 v18 = r00 r04 r08
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"0: \n"
"fmul v10.4s, v16.4s, %14.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r01
"fmul v11.4s, v17.4s, %14.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r05
"fmla v8.4s, v17.4s, %15.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v9.4s, v18.4s, %15.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r06
"fmla v10.4s, v19.4s, %14.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v11.4s, v20.4s, %14.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r07
"fmla v8.4s, v21.4s, %14.s[2] \n"
"fmla v9.4s, v22.4s, %14.s[2] \n"
// r1
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%3] \n"// v12 v13 v14 = r10 r14 r18
"fmla v10.4s, v19.4s, %14.s[3] \n"
"fmla v11.4s, v20.4s, %14.s[3] \n"
"fmla v8.4s, v12.4s, %15.s[1] \n"
"ext v19.16b, v12.16b, v13.16b, #4 \n"// r11
"fmla v9.4s, v13.4s, %15.s[1] \n"
"ext v20.16b, v13.16b, v14.16b, #4 \n"// r15
"fmla v10.4s, v13.4s, %16.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #8 \n"// r12
"fmla v11.4s, v14.4s, %16.s[1] \n"
"ext v22.16b, v13.16b, v14.16b, #8 \n"// r16
"fmla v8.4s, v19.4s, %15.s[2] \n"
"ext v19.16b, v12.16b, v13.16b, #12 \n"// r13
"fmla v9.4s, v20.4s, %15.s[2] \n"
"ext v20.16b, v13.16b, v14.16b, #12 \n"// r17
"fmla v10.4s, v21.4s, %15.s[3] \n"
"fmla v11.4s, v22.4s, %15.s[3] \n"
// r2
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r20 r24 r28
"fmla v8.4s, v19.4s, %16.s[0] \n"
"fmla v9.4s, v20.4s, %16.s[0] \n"
"fmla v10.4s, v16.4s, %16.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r21
"fmla v11.4s, v17.4s, %16.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r25
"fmla v8.4s, v17.4s, %17.s[2] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r22
"fmla v9.4s, v18.4s, %17.s[2] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r26
"fmla v10.4s, v19.4s, %16.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r23
"fmla v11.4s, v20.4s, %16.s[3] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r27
"fmla v8.4s, v21.4s, %17.s[0] \n"
"fmla v9.4s, v22.4s, %17.s[0] \n"
// r3
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n"// v12 v13 v14 = r30 r34 r38
"fmla v10.4s, v19.4s, %17.s[1] \n"
"fmla v11.4s, v20.4s, %17.s[1] \n"
"fmla v8.4s, v12.4s, %17.s[3] \n"
"ext v19.16b, v12.16b, v13.16b, #4 \n"// r11
"fmla v9.4s, v13.4s, %17.s[3] \n"
"ext v20.16b, v13.16b, v14.16b, #4 \n"// r15
"fmla v10.4s, v13.4s, %18.s[3] \n"
"ext v21.16b, v12.16b, v13.16b, #8 \n"// r12
"fmla v11.4s, v14.4s, %18.s[3] \n"
"ext v22.16b, v13.16b, v14.16b, #8 \n"// r16
"fmla v8.4s, v19.4s, %18.s[0] \n"
"ext v19.16b, v12.16b, v13.16b, #12 \n"// r13
"fmla v9.4s, v20.4s, %18.s[0] \n"
"ext v20.16b, v13.16b, v14.16b, #12 \n"// r17
"fmla v10.4s, v21.4s, %18.s[1] \n"
"fmla v11.4s, v22.4s, %18.s[1] \n"
// r4
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n"// v16 v17 v18 = r40 r44 r48
"fmla v8.4s, v19.4s, %18.s[2] \n"
"fmla v9.4s, v20.4s, %18.s[2] \n"
"fmla v10.4s, v16.4s, %19.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r41
"fmla v11.4s, v17.4s, %19.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r45
"fmla v8.4s, v17.4s, %20.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r42
"fmla v9.4s, v18.4s, %20.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r46
"fmla v10.4s, v19.4s, %19.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r43
"fmla v11.4s, v20.4s, %19.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r47
"fmla v8.4s, v21.4s, %19.s[2] \n"
"add %2, %2, #32 \n"
"fmla v9.4s, v22.4s, %19.s[2] \n"
"add %3, %3, #32 \n"
"fmla v10.4s, v19.4s, %19.s[3] \n"
"add %4, %4, #32 \n"
"fmla v11.4s, v20.4s, %19.s[3] \n"
// r0
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n"// v16 v17 v18 = r00 r04 r08
"add %5, %5, #32 \n"
"fadd v10.4s, v8.4s, v10.4s \n"
"add %6, %6, #32 \n"
"fadd v11.4s, v9.4s, v11.4s \n"
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"subs %w0, %w0, #1 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22"
);
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
// r0
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.4s, v17.4s}, [%1] \n"// v16 v17 = r00 r04
"mov v8.16b, %19.16b \n"// v8 = _bias0
"add %1, %1, #16 \n"
"fmul v9.4s, v16.4s, %12.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r01
"fmla v8.4s, v17.4s, %13.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v9.4s, v18.4s, %12.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v8.4s, v19.4s, %12.s[2] \n"
// r1
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4s, v11.4s}, [%2] \n"// v10 v11 = r10 r14
"fmla v9.4s, v20.4s, %12.s[3] \n"
"add %2, %2, #16 \n"
"fmla v8.4s, v10.4s, %13.s[1] \n"
"ext v12.16b, v10.16b, v11.16b, #4 \n"// r11
"fmla v9.4s, v11.4s, %14.s[1] \n"
"ext v13.16b, v10.16b, v11.16b, #8 \n"// r12
"fmla v8.4s, v12.4s, %13.s[2] \n"
"ext v14.16b, v10.16b, v11.16b, #12 \n"// r13
"fmla v9.4s, v13.4s, %13.s[3] \n"
// r2
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4s, v17.4s}, [%3] \n"// v16 v17 = r20 r24
"fmla v8.4s, v14.4s, %14.s[0] \n"
"add %3, %3, #16 \n"
"fmla v9.4s, v16.4s, %14.s[2] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r21
"fmla v8.4s, v17.4s, %15.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r22
"fmla v9.4s, v18.4s, %14.s[3] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r23
"fmla v8.4s, v19.4s, %15.s[0] \n"
// r3
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v10.4s, v11.4s}, [%4] \n"// v10 v11 = r30 r34
"fmla v9.4s, v20.4s, %15.s[1] \n"
"add %4, %4, #16 \n"
"fmla v8.4s, v10.4s, %15.s[3] \n"
"ext v12.16b, v10.16b, v11.16b, #4 \n"// r31
"fmla v9.4s, v11.4s, %16.s[3] \n"
"ext v13.16b, v10.16b, v11.16b, #8 \n"// r32
"fmla v8.4s, v12.4s, %16.s[0] \n"
"ext v14.16b, v10.16b, v11.16b, #12 \n"// r33
"fmla v9.4s, v13.4s, %16.s[1] \n"
// r4
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4s, v17.4s}, [%5] \n"// v16 v17 = r40 r44
"fmla v8.4s, v14.4s, %16.s[2] \n"
"add %5, %5, #16 \n"
"fmla v9.4s, v16.4s, %17.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r41
"fmla v8.4s, v17.4s, %18.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r42
"fmla v9.4s, v18.4s, %17.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r43
"fmla v8.4s, v19.4s, %17.s[2] \n"
"fmla v9.4s, v20.4s, %17.s[3] \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415), // %15
"w"(_k16171819), // %16
"w"(_k20212223), // %17
"w"(_k24242424), // %18
"w"(_bias0) // %19
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20"
);
}
#else
if (nn > 0)
{
asm volatile(
// r0
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2] \n"// q10 q11 = r00 r04
"vmov q8, %q21 \n"// q8 = _bias0
"0: \n"
"vmul.f32 q9, q10, %e14[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r01
"vmla.f32 q8, q11, %e15[0] \n"
"vext.32 q13, q10, q11, #2 \n"// r02
"vmla.f32 q9, q12, %e14[1] \n"
"vext.32 q12, q10, q11, #3 \n"// r03
"vmla.f32 q8, q13, %f14[0] \n"
// r1
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3] \n"// q14 q15 = r10 r14
"vmla.f32 q9, q12, %f14[1] \n"
"add %3, #16 \n"
"vmla.f32 q8, q14, %e15[1] \n"
"vext.32 q12, q14, q15, #1 \n"// r11
"vmla.f32 q9, q15, %e16[1] \n"
"vext.32 q13, q14, q15, #2 \n"// r12
"vmla.f32 q8, q12, %f15[0] \n"
"vext.32 q12, q14, q15, #3 \n"// r13
"vmla.f32 q9, q13, %f15[1] \n"
// r2
"pld [%4, #256] \n"
"vld1.f32 {d20-d23}, [%4] \n"// q10 q11 = r20 r24
"vmla.f32 q8, q12, %e16[0] \n"
"add %4, #16 \n"
"vmla.f32 q9, q10, %f16[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r21
"vmla.f32 q8, q11, %f17[0] \n"
"vext.32 q13, q10, q11, #2 \n"// r22
"vmla.f32 q9, q12, %f16[1] \n"
"vext.32 q12, q10, q11, #3 \n"// r23
"vmla.f32 q8, q13, %e17[0] \n"
// r3
"pld [%5, #256] \n"
"vld1.f32 {d28-d31}, [%5] \n"// q14 q15 = r30 r34
"vmla.f32 q9, q12, %e17[1] \n"
"add %5, #16 \n"
"vmla.f32 q8, q14, %f17[1] \n"
"vext.32 q12, q14, q15, #1 \n"// r31
"vmla.f32 q9, q15, %f18[1] \n"
"vext.32 q13, q14, q15, #2 \n"// r32
"vmla.f32 q8, q12, %e18[0] \n"
"vext.32 q12, q14, q15, #3 \n"// r33
"vmla.f32 q9, q13, %e18[1] \n"
// r4
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6] \n"// q10 q11 = r40 r44
"vmla.f32 q8, q12, %f18[0] \n"
"add %6, #16 \n"
"vmla.f32 q9, q10, %e19[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r41
"vmla.f32 q8, q11, %e20[0] \n"
"vext.32 q13, q10, q11, #2 \n"// r42
"vmla.f32 q9, q12, %e19[1] \n"
"vext.32 q12, q10, q11, #3 \n"// r43
"vmla.f32 q8, q13, %f19[0] \n"
"add %2, #16 \n"
"vmla.f32 q9, q12, %f19[1] \n"
// r0
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2] \n"// q10 q11 = r00 r04
"vadd.f32 q9, q9, q8 \n"
"vmov q8, %q21 \n"// q8 = _bias0
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
// TODO neon assembly optimize
float sum = bias0;
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
*outptr = sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
#else
// TODO neon assembly optimize
asm volatile(
"veor q14, q14 \n"
"vext.32 q14, %q19, q14, #3 \n"// q14 = bias0 0 0 0
"vld1.f32 {d16-d17}, [%1] \n"// q8 = r00 r01 r02 r03
"vld1.f32 {d18-d19}, [%2] \n"// q9 = r10 r11 r12 r13(X)
"add r4, %1, #16 \n"
"vld1.f32 {d19[1]}, [r4] \n"
"vext.32 q9, q9, q9, #3 \n"// q9 = r04 r10 r11 r12
"vmla.f32 q14, q8, %q12 \n"
"add r4, %2, #12 \n"
"vld1.f32 {d20}, [r4] \n"// d20 = r13 r14
"vld1.f32 {d21}, [%3] \n"// d21 = r20 r21
"vmla.f32 q14, q9, %q13 \n"
"add r4, %3, #8 \n"
"vld1.f32 {d22-d23}, [r4] \n"// q11 = r22 r23 r24 X
"vld1.f32 {d23[1]}, [%4] \n"// q11 = r22 r23 r24 r30
"vmla.f32 q14, q10, %q14 \n"
"add r4, %4, #4 \n"
"vld1.f32 {d24-d25}, [r4] \n"// q12 = r31 r32 r33 r34
"vmla.f32 q14, q11, %q15 \n"
"vld1.f32 {d26-d27}, [%5] \n"// q13 = r40 r41 r42 r43
"vmla.f32 q14, q12, %q16 \n"
"veor d30, d30 \n"
"add r4, %5, #16 \n"
"vld1.f32 {d30[0]}, [r4] \n"// d30 = r44 0
"vmla.f32 q14, q13, %q17 \n"
"vmla.f32 d28, d30, %e18 \n"
"add %1, #4 \n"
// h-sum
"vadd.f32 d28, d28, d29 \n"
"add %2, #4 \n"
"add %3, #4 \n"
"vpadd.f32 d28, d28, d28 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vst1.f32 {d28[0]}, [%0]! \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415), // %15
"w"(_k16171819), // %16
"w"(_k20212223), // %17
"w"(_k24242424), // %18
"w"(_bias0) // %19
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
*outptr = sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
#endif
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
static void convdw5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
//int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*25;
float* outptr = out;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
float32x4_t _bias0 = vdupq_n_f32(bias0);
#endif // __ARM_NEON
int i = 0;
// NOTE unroll outh 2 results somewhat speed drop :| (about -4%)
// so we do not implement it here
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v16.4s, v17.4s}, [%2], #32 \n"// v16 v17 = r00 r01
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v18.4s, v19.4s}, [%2], #32 \n"// v18 v19 = r08 r09
"0: \n"
"fmul v10.4s, v16.4s, %14.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v20.4s, v21.4s}, [%2] \n"// v20 v21 = r016 r017
"fmul v11.4s, v18.4s, %14.s[0] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r02
"fmla v8.4s, v17.4s, %14.s[1] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r010
"fmla v9.4s, v19.4s, %14.s[1] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r03
"fmla v10.4s, v22.4s, %14.s[2] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r011
"fmla v11.4s, v25.4s, %14.s[2] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r04
"fmla v8.4s, v23.4s, %14.s[3] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r012
"fmla v9.4s, v26.4s, %14.s[3] \n"
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v12.4s, v13.4s}, [%3], #32 \n"// v12 v13 = r10 r11
"fmla v10.4s, v24.4s, %15.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v14.4s, v15.4s}, [%3], #32 \n"// v14 v15 = r18 r19
"fmla v11.4s, v27.4s, %15.s[0] \n"
"fmla v8.4s, v12.4s, %15.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v20.4s, v21.4s}, [%3] \n"// v20 v21 = r116 r117
"fmla v9.4s, v14.4s, %15.s[1] \n"
"ext v22.16b, v12.16b, v14.16b, #4 \n"// v22 = r12
"fmla v10.4s, v13.4s, %15.s[2] \n"
"ext v25.16b, v14.16b, v20.16b, #4 \n"// v25 = r110
"fmla v11.4s, v15.4s, %15.s[2] \n"
"ext v23.16b, v13.16b, v15.16b, #4 \n"// v23 = r13
"fmla v8.4s, v22.4s, %15.s[3] \n"
"ext v26.16b, v15.16b, v21.16b, #4 \n"// v26 = r111
"fmla v9.4s, v25.4s, %15.s[3] \n"
"ext v24.16b, v12.16b, v14.16b, #8 \n"// v24 = r14
"fmla v10.4s, v23.4s, %16.s[0] \n"
"ext v27.16b, v14.16b, v20.16b, #8 \n"// v27 = r112
"fmla v11.4s, v26.4s, %16.s[0] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v16.4s, v17.4s}, [%4], #32 \n"// v16 v17 = r20 r21
"fmla v8.4s, v24.4s, %16.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v18.4s, v19.4s}, [%4], #32 \n"// v18 v19 = r28 r29
"fmla v9.4s, v27.4s, %16.s[1] \n"
"fmla v10.4s, v16.4s, %16.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v20.4s, v21.4s}, [%4] \n"// v20 v21 = r216 r217
"fmla v11.4s, v18.4s, %16.s[2] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r22
"fmla v8.4s, v17.4s, %16.s[3] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r210
"fmla v9.4s, v19.4s, %16.s[3] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r23
"fmla v10.4s, v22.4s, %17.s[0] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r211
"fmla v11.4s, v25.4s, %17.s[0] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r24
"fmla v8.4s, v23.4s, %17.s[1] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r212
"fmla v9.4s, v26.4s, %17.s[1] \n"
// r3
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v12.4s, v13.4s}, [%5], #32 \n"// v12 v13 = r30 r31
"fmla v10.4s, v24.4s, %17.s[2] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v14.4s, v15.4s}, [%5], #32 \n"// v14 v15 = r38 r39
"fmla v11.4s, v27.4s, %17.s[2] \n"
"fmla v8.4s, v12.4s, %17.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v20.4s, v21.4s}, [%5] \n"// v20 v21 = r316 r317
"fmla v9.4s, v14.4s, %17.s[3] \n"
"ext v22.16b, v12.16b, v14.16b, #4 \n"// v22 = r32
"fmla v10.4s, v13.4s, %18.s[0] \n"
"ext v25.16b, v14.16b, v20.16b, #4 \n"// v25 = r310
"fmla v11.4s, v15.4s, %18.s[0] \n"
"ext v23.16b, v13.16b, v15.16b, #4 \n"// v23 = r33
"fmla v8.4s, v22.4s, %18.s[1] \n"
"ext v26.16b, v15.16b, v21.16b, #4 \n"// v26 = r311
"fmla v9.4s, v25.4s, %18.s[1] \n"
"ext v24.16b, v12.16b, v14.16b, #8 \n"// v24 = r34
"fmla v10.4s, v23.4s, %18.s[2] \n"
"ext v27.16b, v14.16b, v20.16b, #8 \n"// v27 = r312
"fmla v11.4s, v26.4s, %18.s[2] \n"
// r4
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v16.4s, v17.4s}, [%6], #32 \n"// v16 v17 = r40 r41
"fmla v8.4s, v24.4s, %18.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v18.4s, v19.4s}, [%6], #32 \n"// v18 v19 = r48 r49
"fmla v9.4s, v27.4s, %18.s[3] \n"
"fmla v10.4s, v16.4s, %19.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v20.4s, v21.4s}, [%6] \n"// v20 v21 = r416 r417
"fmla v11.4s, v18.4s, %19.s[0] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r42
"fmla v8.4s, v17.4s, %19.s[1] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r410
"fmla v9.4s, v19.4s, %19.s[1] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r43
"fmla v10.4s, v22.4s, %19.s[2] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r411
"fmla v11.4s, v25.4s, %19.s[2] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r44
"fmla v8.4s, v23.4s, %19.s[3] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r412
"fmla v9.4s, v26.4s, %19.s[3] \n"
"fmla v10.4s, v24.4s, %20.s[0] \n"
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v16.4s, v17.4s}, [%2], #32 \n"// v16 v17 = r00 r01
"fmla v11.4s, v27.4s, %20.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v18.4s, v19.4s}, [%2], #32 \n"// v18 v19 = r08 r09
"fadd v10.4s, v8.4s, v10.4s \n"
"fadd v11.4s, v9.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"bne 0b \n"
"sub %2, %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#else
if (nn > 0)
{
asm volatile(
// r0
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2]! \n"// q10 q11 = r00 r01
"vmov q8, %q21 \n"
"pld [%2, #128] \n"
"vld2.f32 {d24-d25}, [%2] \n"// q12 = r08 x x
"0: \n"
"vmul.f32 q9, q10, %e14[0] \n"
"vmov d26, d25 \n"// q13 = r09 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r02
"vmla.f32 q8, q11, %e14[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r03
"vmla.f32 q9, q14, %f14[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r04
"vmla.f32 q8, q15, %f14[1] \n"
// r1
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3]! \n"// q10 q11 = r10 r11
"vmla.f32 q9, q14, %e15[0] \n"
"pld [%3, #128] \n"
"vld2.f32 {d24-d25}, [%3] \n"// q12 = r18 x x
"vmla.f32 q8, q10, %e15[1] \n"
"vmov d26, d25 \n"// q13 = r19 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r12
"vmla.f32 q9, q11, %f15[0] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r13
"vmla.f32 q8, q14, %f15[1] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r14
"vmla.f32 q9, q15, %e16[0] \n"
// r2
"pld [%4, #256] \n"
"vld2.f32 {d20-d23}, [%4]! \n"// q10 q11 = r20 r21
"vmla.f32 q8, q14, %e16[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d24-d25}, [%4] \n"// q12 = r28 x x
"vmla.f32 q9, q10, %f16[0] \n"
"vmov d26, d25 \n"// q13 = r29 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r22
"vmla.f32 q8, q11, %f16[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r23
"vmla.f32 q9, q14, %e17[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r24
"vmla.f32 q8, q15, %e17[1] \n"
// r3
"pld [%5, #256] \n"
"vld2.f32 {d20-d23}, [%5]! \n"// q10 q11 = r30 r31
"vmla.f32 q9, q14, %f17[0] \n"
"pld [%5, #128] \n"
"vld2.f32 {d24-d25}, [%5] \n"// q12 = r38 x x
"vmla.f32 q8, q10, %f17[1] \n"
"vmov d26, d25 \n"// q13 = r39 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r32
"vmla.f32 q9, q11, %e18[0] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r33
"vmla.f32 q8, q14, %e18[1] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r34
"vmla.f32 q9, q15, %f18[0] \n"
// r4
"pld [%6, #256] \n"
"vld2.f32 {d20-d23}, [%6]! \n"// q10 q11 = r40 r41
"vmla.f32 q8, q14, %f18[1] \n"
"pld [%6, #128] \n"
"vld2.f32 {d24-d25}, [%6] \n"// q12 = r48 x x
"vmla.f32 q9, q10, %e19[0] \n"
"vmov d26, d25 \n"// q13 = r49 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r42
"vmla.f32 q8, q11, %e19[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r43
"vmla.f32 q9, q14, %f19[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r44
"vmla.f32 q8, q15, %f19[1] \n"
// r0
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2]! \n"// q10 q11 = r00 r01
"vmla.f32 q9, q14, %e20[0] \n"
"pld [%2, #128] \n"
"vld2.f32 {d24-d25}, [%2] \n"// q12 = r08 x x
"vadd.f32 q9, q8, q9 \n"
"vmov q8, %q21 \n"
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = bias0;
#if __ARM_NEON
// TODO neon assembly optimize
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
sum += r0[4] * k0[4];
sum += r1[4] * k1[4];
sum += r2[4] * k2[4];
sum += r3[4] * k3[4];
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
mixed_tentusscher_myo_epi_2004_S1_18.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S1_18.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6407442866583,0.00127024730863006,0.781477837871060,0.781226285372551,0.000173058844459830,0.485844316142820,0.00292517461971129,0.999998371825952,1.91031873007277e-08,1.87288135192733e-05,0.999773522474666,1.00766286802375,0.999999451356628,3.16576129409975e-05,0.737961690357158,10.2441215797546,139.210514590526}; for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5383636643555,0.000359007183612285,0.000154135859579797,0.000217532604523131,0.265156052763393,0.186639850277223,0.149365610424309,3.43320580539409,0.0166941723782826,1.45123160724562,1094.13527370174,0.000494385096732911,0.269171393030809,0.0183256017779276,0.00468024174172971,1.50869252254344e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
integration_point_to_node_transformation_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Michael Andre, https://github.com/msandre
//
#if !defined(KRATOS_INTEGRATION_POINT_TO_NODE_TRANSFORMATION_UTILITY_H_INCLUDED)
#define KRATOS_INTEGRATION_POINT_TO_NODE_TRANSFORMATION_UTILITY_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/element.h"
#include "includes/model_part.h"
#include "utilities/openmp_utils.h"
// Application includes
#include "fluid_dynamics_application_variables.h"
namespace Kratos
{
///@addtogroup FluidDynamicsApplication
///@{
///@name Kratos Classes
///@{
/**
* @brief A utility for transforming values on integration points to nodes.
*
* This utility was created to transform vorticity and q-criterion variables
* from the integration points where they are computed to the nodes for
* visualization. The utility is designed to work in both 2D and 3D with and
* without the MPI library. Each nodal value is computed as a weighted average
* of the neighboring elements.
*/
template<unsigned int TDim, unsigned int TNumNodes = TDim + 1>
class IntegrationPointToNodeTransformationUtility {
public:
///@name Type Definitions
///@{
/// Pointer definition of IntegrationPointToNodeTransformationUtility
KRATOS_CLASS_POINTER_DEFINITION(IntegrationPointToNodeTransformationUtility);
template<class TVariableType>
void TransformFromIntegrationPointsToNodes(const Variable<TVariableType>& rVariable,
ModelPart& rModelPart) const
{
#pragma omp parallel
{
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(rVariable) = rVariable.Zero();
itNode->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
}
}
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd);
std::vector<TVariableType> ValuesOnIntPoint;
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
itElem->GetValueOnIntegrationPoints(rVariable,ValuesOnIntPoint,
rModelPart.GetProcessInfo());
Element::GeometryType& rGeom = itElem->GetGeometry();
const double Weight = rGeom.Volume() / (double) TNumNodes;
for (unsigned int iNode = 0; iNode < rGeom.size(); iNode++)
{
rGeom[iNode].SetLock();
rGeom[iNode].FastGetSolutionStepValue(rVariable) += Weight * ValuesOnIntPoint[0];
rGeom[iNode].FastGetSolutionStepValue(NODAL_AREA) += Weight;
rGeom[iNode].UnSetLock();
}
}
}
rModelPart.GetCommunicator().AssembleCurrentData(rVariable);
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
#pragma omp parallel
{
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA);
itNode->FastGetSolutionStepValue(rVariable) /= NodalArea;
}
}
}
}; // class IntegrationPointToNodalDataTransformationUtility
///@}
///@} // Fluid Dynamics Application group
} // namespace Kratos
#endif // KRATOS_INTEGRATION_POINT_TO_NODAL_DATA_TRANSFORMATION_UTILITY_H_INCLUDED defined
|
DRB037-truedepseconddimension-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized in this program.
The inner loop has true dependence.
Data race pair: b[i][j]@63:7 vs. b[i][j-1]@63:15
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
double b[1000][1000];
int main(int argc,char *argv[])
{
int i;
int j;
int n = 1000;
int m = 1000;
#pragma omp parallel for private (i,j)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= m - 1; j += 1) {
b[i][j] = (i + j);
}
}
#pragma omp parallel for private (i,j) firstprivate (n,m)
for (i = 0; i <= n - 1; i += 1) {
for (j = 1; j <= m - 1; j += 1) {
b[i][j] = b[i][j - 1];
}
}
printf("b[500][500]=%f\n",b[500][500]);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.