source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
Shapes.h | /*************************************************************************
> File Name: Shapes.h
> Author: Yibo Lin
> Mail: yibolin@utexas.edu
> Created Time: Thu 06 Nov 2014 09:04:57 AM CST
************************************************************************/
#ifndef SIMPLEMPL_SHAPES_H
#define SIMPLEMPL_SHAPES_H
#include <iostream>
#include <string>
#include <cmath> // std::abs
#include <boost/version.hpp>
#if (BOOST_VERSION/100)%1000 > 55
// this is to fix the problem in boost 1.57.0 (1.55.0 works fine)
// it reports problem to find abs
namespace boost { namespace polygon {
using std::abs;
}} // namespace boost // namespace polygon
#endif
#include <boost/cstdint.hpp>
//#include <boost/polygon/polygon.hpp>
#include <boost/geometry.hpp>
// use adapted boost.polygon in boost.geometry, which is compatible to rtree
#include <boost/geometry/geometries/adapted/boost_polygon.hpp>
#include <boost/geometry/index/rtree.hpp>
#include "GeometryApi.h"
#include "Msg.h"
SIMPLEMPL_BEGIN_NAMESPACE
namespace gtl = boost::polygon;
namespace bg = boost::geometry;
namespace bgi = bg::index;
using boost::int32_t;
using boost::int64_t;
using gtl::rectangle_concept;
using gtl::polygon_90_concept;
using gtl::polygon_90_set_concept;
using gtl::point_data;
using gtl::segment_data;
using gtl::rectangle_data;
using gtl::polygon_90_data;
using gtl::polygon_90_set_data;
using namespace gtl::operators;
/// =========================================================================================
/// All different shapes used in SimpleMPL
/// One thing should raise your caution: **inheritance and memory usage.
/// If a class contains virtual functions, it will add additional 8 bytes to the class. **
/// In class Shape, we need at most 8 bytes for data member.
/// In class Rectangle, 4 coordinates take 16 bytes.
/// If we keep a virtual table in Shape, say virtual destructor,
/// then the size of Rectangle increases to 32 bytes.
/// But actually the data only takes 24 bytes.
/// I know it is usually better to have a virtual destructor in the base
/// class, but if we make sure there is no deletion to base class pointer
/// for derived class, it should be fine without it.
/// Class Shape does not necessarily need virtual functions.
/// ** As a consequence, no deletion of Shape pointer for derived classes is allowed. **
/// =========================================================================================
class Shape
{
public:
/// default constructor
Shape() {this->initialize();}
/// copy constructor
Shape(Shape const& rhs) {this->copy(rhs);}
/// assignment
Shape& operator=(Shape const& rhs)
{
if (this != &rhs)
this->copy(rhs);
return *this;
}
/// whether should I add virtual here
/// a trade-off between safety and memory
~Shape() {}
#ifdef DEBUG
long internal_id() {return m_internal_id;}
#endif
int8_t color() const {return m_color;}
void color(int8_t c) {m_color = c;}
int32_t layer() const {return m_layer;}
void layer(int32_t l) {m_layer = l;}
uint32_t pattern_id() const {return m_pattern_id;}
void pattern_id(uint32_t p) {m_pattern_id = p;}
private:
void initialize()
{
m_color = m_layer = -1;
m_pattern_id = std::numeric_limits<uint32_t>::max();
#ifdef DEBUG
#ifdef _OPENMP
#pragma omp critical
#endif
m_internal_id = generate_id();
#endif
}
void copy(Shape const& rhs)
{
this->m_color = rhs.m_color;
this->m_layer = rhs.m_layer;
this->m_pattern_id = rhs.m_pattern_id;
#ifdef DEBUG
this->m_internal_id = rhs.m_internal_id;
#endif
}
static long generate_id()
{
static long cnt = -1;
mplAssert(cnt < std::numeric_limits<long>::max());
#ifdef _OPENMP
#pragma omp atomic
#endif
cnt += 1;
return cnt;
}
#ifdef DEBUG
long m_internal_id; ///< internal id
#endif
protected:
int32_t m_color : 4; ///< color, 4-bit is enough
int32_t m_layer : 28; ///< input layer, 28-bit is enough
///< actually 20-bit is already enough, but a class needs a least 8 bytes
uint32_t m_pattern_id; ///< index in the pattern array
};
template <typename T>
class Rectangle : public rectangle_data<T>, public Shape
{
public:
typedef T coordinate_type;
typedef rectangle_data<coordinate_type> base_type;
typedef Shape shape_base_type;
typedef rectangle_concept geometry_type; // important
typedef point_data<coordinate_type> point_type;
using typename base_type::interval_type;
/// default constructor
Rectangle() : base_type(), shape_base_type() {}
Rectangle(interval_type const& hor, interval_type const& ver) : base_type(hor, ver), shape_base_type() {}
Rectangle(coordinate_type xl, coordinate_type yl, coordinate_type xh, coordinate_type yh) : base_type(xl, yl, xh, yh), shape_base_type() {}
/// copy constructor
Rectangle(Rectangle const& rhs) : base_type(rhs), shape_base_type(rhs) {}
/// assignment
Rectangle& operator=(Rectangle const& rhs)
{
if (this != &rhs)
{
this->base_type::operator=(rhs);
this->shape_base_type::operator=(rhs);
}
return *this;
}
/// convertion to std::string
operator std::string() const
{
std::ostringstream oss;
print(oss);
return oss.str();
}
/// whether should I add virtual here
/// a trade-off between safety and memory
~Rectangle() {}
void print(std::ostream& os) const
{
os << "(" << gtl::xl(*this) << ", " << gtl::yl(*this) << ", " << gtl::xh(*this) << ", " << gtl::yh(*this) << ")";
}
friend std::ostream& operator<<(std::ostream& os, Rectangle const& rhs)
{
rhs.print(os);
return os;
}
};
template <typename T>
class Polygon : public polygon_90_data<T>, public Shape
{
public:
typedef T coordinate_type;
typedef polygon_90_data<coordinate_type> base_type;
typedef Shape shape_base_type;
typedef Rectangle<coordinate_type> rectangle_type;
typedef point_data<coordinate_type> point_type;
using typename base_type::geometry_type;
using typename base_type::compact_iterator_type;
using typename base_type::iterator_type;
using typename base_type::area_type;
/// default constructor
Polygon() : base_type(), shape_base_type() {}
/// copy constructor
Polygon(Polygon const& rhs) : base_type(rhs), shape_base_type(rhs) {}
/// assignment
Polygon& operator=(Polygon const& rhs)
{
if (this != &rhs)
{
this->base_type::operator=(rhs);
this->shape_base_type::operator=(rhs);
}
return *this;
}
/// convertion to std::string
operator std::string() const
{
std::ostringstream oss;
print(oss);
return oss.str();
}
/// whether should I add virtual here
/// a trade-off between safety and memory
~Polygon() {}
void print(std::ostream& os) const
{
os << "(";
for (iterator_type it = this->begin(), ite = this->end(); it != ite; ++it)
os << "(" << it->x() << ", " << it->y() << ")";
os << ")";
}
friend std::ostream& operator<<(std::ostream& os, Polygon const& rhs)
{
rhs.print(os);
return os;
}
};
SIMPLEMPL_END_NAMESPACE
#endif
|
test.c |
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (1024*3)
#define M (16*32)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
double A[M][N], B[M][N], C[N], D[N], E[N];
double S[M];
double p[2];
int main(void) {
check_offloading();
INIT();
int cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
int tms = 16;
int th = 32;
int threads[1]; threads[0] = th-1;
//
// Test: proc_bind clause
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(master)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(close)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(spread)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: private, shared clauses on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p,q) shared(A,B,C,D,E)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p = 2; \
double q = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[idx][i] += p; \
B[idx][i] += q; \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1))))
//
// Test: firstprivate clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p,q)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p = -4; \
double q = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i] + p; \
B[idx][i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: lastprivate clause on omp target teams distribute parallel for with nested parallel.
//
TESTD("omp target teams distribute parallel for num_teams(tms) num_threads(th)", {
for (int idx = 0; idx < tms*th; idx++) {
double q0[1];
double q1[1];
double q2[1];
double q3[1];
S[idx] = 0;
for (int i = 0; i < N; i++) {
A[idx][i] = B[idx][i] = 0;
}
_Pragma("omp parallel for lastprivate(q0) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q0[0] = C[i] + D[i];
A[idx][i] += q0[0];
}
_Pragma("omp parallel for schedule(auto) lastprivate(q1) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q1[0] = C[i] + D[i];
A[idx][i] += q1[0];
}
_Pragma("omp parallel for schedule(static) lastprivate(q2) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q2[0] = D[i] + E[i];
B[idx][i] += q2[0];
}
_Pragma("omp parallel for schedule(static,9) lastprivate(q3) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q3[0] = D[i] + E[i];
B[idx][i] += q3[0];
}
double tmp = q0[0] + q1[0] + q2[0] + q3[0];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
}
}, VERIFY(0, tms*th, S[i], (double) 2 * (N + (N/2*(N+1))) ));
//
// Test: private clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = 2; p[1] = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p[0] = C[i] + D[i]; \
p[1] = D[i] + E[i]; \
A[idx][i] += p[0]; \
B[idx][i] += p[1]; \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1))))
//
// Test: firstprivate clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = -4; p[1] = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i] + p[0]; \
B[idx][i] += D[i] + E[i] + p[1]; \
if (i == N-1) { \
p[0] += 6; \
p[1] += 9; \
} \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: collapse clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES collapse(2)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[idx][i*3+j] += C[i*3+j] + D[i*3+j]; \
B[idx][i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: ordered clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES ordered
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
,
for (int i = 0; i < N; i++) { \
_Pragma("omp ordered") \
S[idx] += C[i] + D[i]; \
}
,
{
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: Ensure coalesced scheduling on GPU.
//
if (!cpuExec) {
TESTD("omp target teams distribute parallel for num_teams(tms) num_threads(th)", {
for (int idx = 0; idx < tms*th; idx++) {
S[idx] = 0;
for (int i = 0; i < 96; i++) {
A[idx][i] = 0;
}
_Pragma("omp parallel for num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
_Pragma("omp parallel for schedule(auto) num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
_Pragma("omp parallel for schedule(static,1) num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
double tmp = 0;
for (int i = 0; i < 96; i++) {
tmp += A[idx][i];
}
S[idx] = tmp;
}
}, VERIFY(0, tms*th, S[i], (double) 3 * (32*32 + 64*32) ));
} else {
DUMP_SUCCESS(1);
}
return 0;
}
|
keystore_fmt_plug.c | /* Java KeyStore cracker. Written by Dhiru Kholia <dhiru at openwall.com> and
* Narendra Kangralkar <narendrakangralkar at gmail.com>.
*
* Input Format: $keystore$target$data_length$data$hash$nkeys$keylength$keydata$keylength$keydata...
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru.kholia at gmail.com>
* and Narendra Kangralkar <narendrakangralkar at gmail.com> and it is hereby
* released to the general public under the following terms: *
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_keystore;
#elif FMT_REGISTERS_H
john_register_one(&fmt_keystore);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "sha.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "memdbg.h"
#define FORMAT_LABEL "keystore"
#define FORMAT_NAME "Java KeyStore"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 20
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define SZ 819200
static struct fmt_tests keystore_tests[] = {
{"$keystore$0$2126$feedfeed000000020000000100000001000f616e64726f696464656275676b65790000013c3ea72ab000000501308204fd300e060a2b060104012a021101010500048204e9e76fea55eed58e4257c253b670948abb18093fbbb667f00807560242f17a4b3cd8b90d0e2a5c6c96f758f45e0e2320039c10af4ecc95e56930fd85713318da506bb48fa586b5caf7c286cf3b66134cb0e13dcdbc665680fb1214d9db2405ccb297acdefd4f5f7cb1c1babd5b77414223b45ae11ab0ec0a2ce5423a6ab69f372adb79a38973a0fde89f9b1e8ef62de04a5e6b35008ce3191c350f98a98ed917ccfc3524f9a4786a3ab055cee25efb118f67d73cacfdd5a3f0ca04399d2b31acfffc63ab6b47f371ff879768ef84bc8c58bcfaab1539e6343cf7b81d0446f57abbeb84fb20b540616aabbfd4c823acb2124ea25538c7531609b72b8da90327a8a3845bcfd69d659a1a77c35efb0d62651e4178459dfde9e165edc6d52cc3d8fee78e3132346588b09e3d27e1400421d33e88748ed1c01af1dc6064a71c991e0322e72c55ed5bcd8c232048bddfecd299d4d9c296639866dd21ad073a4993733b44bac4d6a77eec05cda65d5d9ad0a42a5aa9d443e3ba7ea5744e7fdc2617f527cd9cf480bce033bd5eec6746b2a58328aeed26757664109e1046c93e2377db18c58c35828916f4a42964aae2fe75ad944896bd321ae92cd5723735b37f85250a635a8d1875d3efb2ffbcabc3602ea3b6952da060ec1d1c0a961b1a50836dee911a166e09a33d036d6ef7dc988545b580841945a8718b178bb06ef8e78c6703a496cf66990d57b696b2117922ee1855dff439b2bda3201b145fdb4533b7d2cfa22291a79bac67bb6b3d963dd4137b6208931f02c3ee30bfd0731443edadd5bfffec0147f5f2bd13930deace26fec0ebf0c1befe1294875fb9d8a08919fdc1697ec78d1b86c03a0db4e61bd6a9db6803fdd8e2547ead44bd48cf223b964b0c6903ede0fc0e1b7d02b83ba18ed649bc0e40896ff7cde1d092a9f30314da8fc67d113c79fe7046da75bc090b08b3f31a5d0feb33abab2c608e3afaca1521f2809ae79c14e5ab16d7fa319ddc4dbae61cf41bd15829055970f26361fc1ae22a15e401b25eb500411e70a3cacca38e0d59a6add6513c02d0e6a766303e231d8adf8368b1579e7d58a7d3a5981542c9b8fec0b1780713031fefa60d93755215cbbc34f27634537b6c4fe391578be1a3547fc97d1eeb3e8b11444e8ad99902911fba55034a2796d791039bb29bd193406f05b942f69d47a4a236a64f610e7808387586f4a96a84059e93b11355ecd9125e7a805503e41f4097893b043c7d539d76933515c8fbde11f2a69a6f47aebbac3ed29b0231b3a74ecc9a5421ad61c995a039e44c0a8717dd6e5efbdc2f6ab8daefbc58867ca2e852780c66d1163a03662c34b5365983405093452bb004f78eb973a804edb1b4e8214ab982ed9c81992cc508d8852288fee4ced3af41cca7baaddb828830f3e7dd7c92610def60bbaf6a866e84ea81bd4e88a5b5a035b15b370f942af17f213706c681a59da20b150697c188edb4ac8b59b3babf9c895078f268940aa805c15a2712042c22ce5c44a62554d5f2efb6db179e1db29570b6b063d00349a0273277751e6adf32b6d36b02cb81025d80e620b61a418b0584441c087ce75ed03c871dfe8463a9a3641b036e849fd0fdc9b381ebe43e067353642f182d67ef6bef43463dc6b8d7abd035677b443440c7624d91baa11002e193d86a76974eef4f6fb44a8c440b73ddb323e9eb8f7fdd67aa368ce6aefdff1060e6a519d48b28718b1548e4665360f141d5e16027f0e7c41d07c582dd2a29fa55a00f000000010005582e353039000003113082030d308201f5a00302010202043e310348300d06092a864886f70d01010b05003037310b30090603550406130255533110300e060355040a1307416e64726f6964311630140603550403130d416e64726f6964204465627567301e170d3133303131353134343030385a170d3133303431353134343030385a3037310b30090603550406130255533110300e060355040a1307416e64726f6964311630140603550403130d416e64726f696420446562756730820122300d06092a864886f70d01010105000382010f003082010a02820101009116fccceb121c8951fb12992ef59393a8c2aeab4ec76a30a71d3aca40a277ab1500613c30bda5472bc15812bdbe9395b4a6009edaf94ca7427cd94ca840c0ac9d42ab8246a401628dbba7acb408738929b75f319d496e8594afd75423c07299ec195efce351b7f2b730ad5e61ab292a4783611cdad41139302ada3e239656c2ec842a59418efc711072e75193cfba1105a1980a631f4a513e4116a89806a47f8b308c03684e2ce83e03c40c438445143fa3fab756909e101f89410a35bb6e6a5cbdcef19d0359c8ed7862fe7ae7f81c32a9a75f72419f89eddbe4acc4373e45a390fd185ae3b28adb8445c4e38e30773acad396788428b0321936f241e905c50203010001a321301f301d0603551d0e041604148c2df598ae53bebe11c4e4696abc6cad6bce4286300d06092a864886f70d01010b05000382010100507e62f723154b2e818140fbc47547c8a600f97a580de244afdf6cdc02977aa7fb990c77a0d79d3ef53aadcf9d7705b385c365e3e06bf15de1a9d3f5c6b6b40fc4b629f763da8f12fc16a005b66026de2be8f1144d37ef14fc1c99dc13dd33fc750898a7ac9e2a12543402ba5021432a8453d38b4879a95736f65956d13d92d96b6f546b853c92f0cc51a98dcd233076ae285d5ed44601f1fe361974c74067eb263386fe8e085e8b20c3cd72768d4265bd9bf4937b2aeae3323c6289dfe75e820907ba38e85b3fc2ceb44e770b91babfdf1d003bbc56ed7066f97ba86e0648ff0874a31c1563d52f42f38005b3698f800be11257f405b185ca421113072f8531$a8ab7a46059faddb183f66d4aef78f47911c88aa$1$1281$308204fd300e060a2b060104012a021101010500048204e9e76fea55eed58e4257c253b670948abb18093fbbb667f00807560242f17a4b3cd8b90d0e2a5c6c96f758f45e0e2320039c10af4ecc95e56930fd85713318da506bb48fa586b5caf7c286cf3b66134cb0e13dcdbc665680fb1214d9db2405ccb297acdefd4f5f7cb1c1babd5b77414223b45ae11ab0ec0a2ce5423a6ab69f372adb79a38973a0fde89f9b1e8ef62de04a5e6b35008ce3191c350f98a98ed917ccfc3524f9a4786a3ab055cee25efb118f67d73cacfdd5a3f0ca04399d2b31acfffc63ab6b47f371ff879768ef84bc8c58bcfaab1539e6343cf7b81d0446f57abbeb84fb20b540616aabbfd4c823acb2124ea25538c7531609b72b8da90327a8a3845bcfd69d659a1a77c35efb0d62651e4178459dfde9e165edc6d52cc3d8fee78e3132346588b09e3d27e1400421d33e88748ed1c01af1dc6064a71c991e0322e72c55ed5bcd8c232048bddfecd299d4d9c296639866dd21ad073a4993733b44bac4d6a77eec05cda65d5d9ad0a42a5aa9d443e3ba7ea5744e7fdc2617f527cd9cf480bce033bd5eec6746b2a58328aeed26757664109e1046c93e2377db18c58c35828916f4a42964aae2fe75ad944896bd321ae92cd5723735b37f85250a635a8d1875d3efb2ffbcabc3602ea3b6952da060ec1d1c0a961b1a50836dee911a166e09a33d036d6ef7dc988545b580841945a8718b178bb06ef8e78c6703a496cf66990d57b696b2117922ee1855dff439b2bda3201b145fdb4533b7d2cfa22291a79bac67bb6b3d963dd4137b6208931f02c3ee30bfd0731443edadd5bfffec0147f5f2bd13930deace26fec0ebf0c1befe1294875fb9d8a08919fdc1697ec78d1b86c03a0db4e61bd6a9db6803fdd8e2547ead44bd48cf223b964b0c6903ede0fc0e1b7d02b83ba18ed649bc0e40896ff7cde1d092a9f30314da8fc67d113c79fe7046da75bc090b08b3f31a5d0feb33abab2c608e3afaca1521f2809ae79c14e5ab16d7fa319ddc4dbae61cf41bd15829055970f26361fc1ae22a15e401b25eb500411e70a3cacca38e0d59a6add6513c02d0e6a766303e231d8adf8368b1579e7d58a7d3a5981542c9b8fec0b1780713031fefa60d93755215cbbc34f27634537b6c4fe391578be1a3547fc97d1eeb3e8b11444e8ad99902911fba55034a2796d791039bb29bd193406f05b942f69d47a4a236a64f610e7808387586f4a96a84059e93b11355ecd9125e7a805503e41f4097893b043c7d539d76933515c8fbde11f2a69a6f47aebbac3ed29b0231b3a74ecc9a5421ad61c995a039e44c0a8717dd6e5efbdc2f6ab8daefbc58867ca2e852780c66d1163a03662c34b5365983405093452bb004f78eb973a804edb1b4e8214ab982ed9c81992cc508d8852288fee4ced3af41cca7baaddb828830f3e7dd7c92610def60bbaf6a866e84ea81bd4e88a5b5a035b15b370f942af17f213706c681a59da20b150697c188edb4ac8b59b3babf9c895078f268940aa805c15a2712042c22ce5c44a62554d5f2efb6db179e1db29570b6b063d00349a0273277751e6adf32b6d36b02cb81025d80e620b61a418b0584441c087ce75ed03c871dfe8463a9a3641b036e849fd0fdc9b381ebe43e067353642f182d67ef6bef43463dc6b8d7abd035677b443440c7624d91baa11002e193d86a76974eef4f6fb44a8c440b73ddb323e9eb8f7fdd67aa368ce6aefdff1060e6a519d48b28718b1548e4665360f141d5e16027f0e7c41d07c582dd2a29fa55a00f", "android"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int target;
int data_length;
int count;
int keysize;
unsigned char data[SZ];
unsigned char keydata[SZ];
} *cur_salt;
/* To guard against tampering with the keystore, we append a keyed
* hash with a bit of whitener. */
static inline void getPreKeyedHash(char *password, SHA_CTX *ctxp)
{
int i, j;
unsigned char passwdBytes[PLAINTEXT_LENGTH * 2];
char *magic = "Mighty Aphrodite";
for (i=0, j=0; i < strlen(password); i++) {
passwdBytes[j++] = (password[i] >> 8);
passwdBytes[j++] = password[i];
}
SHA1_Init(ctxp);
SHA1_Update(ctxp, passwdBytes, strlen(password) * 2);
SHA1_Update(ctxp, magic, 16);
}
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
int target;
int v;
if (strncmp(ciphertext, "$keystore$", 10) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 10;
if ((p = strtok(ctcopy, "$")) == NULL)
goto bail;
target = atoi(p);
if (target != 1 && target != 0)
goto bail;
if ((p = strtok(NULL, "$")) == NULL)
goto bail;
v = atoi(p);
if ((p = strtok(NULL, "$")) == NULL)
goto bail;
if (strlen(p) != v * 2)
goto bail;
if ((p = strtok(NULL, "$")) == NULL) /* hash */
goto bail;
if ((p = strtok(NULL, "$")) == NULL) /* number of keys */
goto bail;
/* currently we support only 1 key */
if(atoi(p) != 1)
goto bail;
if ((p = strtok(NULL, "$")) == NULL) /* key length */
goto bail;
v = atoi(p);
if (v > SZ)
goto bail;
if ((p = strtok(NULL, "$")) == NULL) /* key data */
goto bail;
if (strlen(p) != v * 2)
goto bail;
MEM_FREE(keeptr);
return 1;
bail:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
/* NOTE: do we need dynamic allocation because of underlying large object size? */
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 10; /* skip over "$keystore$" */
p = strtok(ctcopy, "$");
cs.target = atoi(p);
p = strtok(NULL, "$");
cs.data_length = atoi(p);
p = strtok(NULL, "$");
for (i = 0; i < cs.data_length; i++)
cs.data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "$"); /* skip hash */
p = strtok(NULL, "$");
cs.count = atoi(p);
p = strtok(NULL, "$");
cs.keysize = atoi(p);
for (i = 0; i < cs.keysize; i++)
cs.keydata[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
ctcopy += 10; /* skip over "$keystore$" */
p = strtok(ctcopy, "$");
p = strtok(NULL, "$");
p = strtok(NULL, "$");
p = strtok(NULL, "$"); /* at hash now */
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt*)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (index = 0; index < count; index++)
#endif
{
SHA_CTX ctx;
getPreKeyedHash(saved_key[index], &ctx);
SHA1_Update(&ctx, cur_salt->data, cur_salt->data_length);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void keystore_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_keystore = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
/* FIXME: report cur_salt->data_length as tunable cost? */
{ NULL },
#endif
keystore_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
keystore_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
cp-tree.h | /* Definitions for C++ parsing and type checking.
Copyright (C) 1987-2017 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_CP_TREE_H
#define GCC_CP_TREE_H
#include "tm.h"
#include "hard-reg-set.h"
#include "function.h"
/* In order for the format checking to accept the C++ front end
diagnostic framework extensions, you must include this file before
diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE
in c-common.h. */
#undef GCC_DIAG_STYLE
#define GCC_DIAG_STYLE __gcc_cxxdiag__
#if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H)
#error \
In order for the format checking to accept the C++ front end diagnostic \
framework extensions, you must include this file before diagnostic-core.h and \
c-common.h, not after.
#endif
#include "c-family/c-common.h"
#include "diagnostic.h"
/* A tree node, together with a location, so that we can track locations
(and ranges) during parsing.
The location is redundant for node kinds that have locations,
but not all node kinds do (e.g. constants, and references to
params, locals, etc), so we stash a copy here. */
class cp_expr
{
public:
cp_expr () :
m_value (NULL), m_loc (UNKNOWN_LOCATION) {}
cp_expr (tree value) :
m_value (value), m_loc (EXPR_LOCATION (m_value)) {}
cp_expr (tree value, location_t loc):
m_value (value), m_loc (loc) {}
cp_expr (const cp_expr &other) :
m_value (other.m_value), m_loc (other.m_loc) {}
/* Implicit conversions to tree. */
operator tree () const { return m_value; }
tree & operator* () { return m_value; }
tree & operator-> () { return m_value; }
tree get_value () const { return m_value; }
location_t get_location () const { return m_loc; }
location_t get_start () const
{
source_range src_range = get_range_from_loc (line_table, m_loc);
return src_range.m_start;
}
location_t get_finish () const
{
source_range src_range = get_range_from_loc (line_table, m_loc);
return src_range.m_finish;
}
void set_location (location_t loc)
{
protected_set_expr_location (m_value, loc);
m_loc = loc;
}
void set_range (location_t start, location_t finish)
{
set_location (make_location (m_loc, start, finish));
}
private:
tree m_value;
location_t m_loc;
};
inline bool
operator == (const cp_expr &lhs, tree rhs)
{
return lhs.get_value () == rhs;
}
#include "name-lookup.h"
/* Usage of TREE_LANG_FLAG_?:
0: IDENTIFIER_MARKED (IDENTIFIER_NODEs)
NEW_EXPR_USE_GLOBAL (in NEW_EXPR).
COND_EXPR_IS_VEC_DELETE (in COND_EXPR).
DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR).
COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR).
CLEANUP_P (in TRY_BLOCK)
AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR)
PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF)
PAREN_STRING_LITERAL (in STRING_CST)
CP_DECL_THREAD_LOCAL_P (in VAR_DECL)
KOENIG_LOOKUP_P (in CALL_EXPR)
STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST).
EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT)
STMT_EXPR_NO_SCOPE (in STMT_EXPR)
BIND_EXPR_TRY_BLOCK (in BIND_EXPR)
TYPENAME_IS_ENUM_P (in TYPENAME_TYPE)
OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD, OMP_DISTRIBUTE,
and OMP_TASKLOOP)
BASELINK_QUALIFIED_P (in BASELINK)
TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR)
TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX)
TREE_INDIRECT_USING (in a TREE_LIST of using-directives)
ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute)
ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag)
CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR)
LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR)
DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE)
VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR)
DECL_OVERRIDE_P (in FUNCTION_DECL)
IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR)
TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR)
CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR)
OVL_ARG_DEPENDENT (in OVERLOAD)
PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION)
TINFO_HAS_ACCESS_ERRORS (in TEMPLATE_INFO)
SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR)
COMPOUND_REQ_NOEXCEPT_P (in COMPOUND_REQ)
WILDCARD_PACK_P (in WILDCARD_DECL)
BLOCK_OUTER_CURLY_BRACE_P (in BLOCK)
FOLD_EXPR_MODOP_P (*_FOLD_EXPR)
IF_STMT_CONSTEXPR_P (IF_STMT)
TEMPLATE_TYPE_PARM_FOR_CLASS (TEMPLATE_TYPE_PARM)
1: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE)
TI_PENDING_TEMPLATE_FLAG.
TEMPLATE_PARMS_FOR_INLINE.
DELETE_EXPR_USE_VEC (in DELETE_EXPR).
(TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out).
ICS_ELLIPSIS_FLAG (in _CONV)
DECL_INITIALIZED_P (in VAR_DECL)
TYPENAME_IS_CLASS_P (in TYPENAME_TYPE)
STMT_IS_FULL_EXPR_P (in _STMT)
TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR)
LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR)
DECL_FINAL_P (in FUNCTION_DECL)
QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF)
DECLTYPE_FOR_INIT_CAPTURE (in DECLTYPE_TYPE)
CONSTRUCTOR_NO_IMPLICIT_ZERO (in CONSTRUCTOR)
TINFO_USED_TEMPLATE_ID (in TEMPLATE_INFO)
PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION)
2: IDENTIFIER_OPNAME_P (in IDENTIFIER_NODE)
ICS_THIS_FLAG (in _CONV)
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL)
STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST)
TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE)
TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR)
FNDECL_USED_AUTO (in FUNCTION_DECL)
DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE)
REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF, SCOPE_REF)
AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR)
CONSTRUCTOR_MUTABLE_POISON (in CONSTRUCTOR)
3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out).
ICS_BAD_FLAG (in _CONV)
FN_TRY_BLOCK_P (in TRY_BLOCK)
IDENTIFIER_CTOR_OR_DTOR_P (in IDENTIFIER_NODE)
BIND_EXPR_BODY_BLOCK (in BIND_EXPR)
DECL_NON_TRIVIALLY_INITIALIZED_P (in VAR_DECL)
CALL_EXPR_ORDERED_ARGS (in CALL_EXPR, AGGR_INIT_EXPR)
DECLTYPE_FOR_REF_CAPTURE (in DECLTYPE_TYPE)
4: TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR,
CALL_EXPR, or FIELD_DECL).
IDENTIFIER_TYPENAME_P (in IDENTIFIER_NODE)
DECL_TINFO_P (in VAR_DECL)
FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
5: C_IS_RESERVED_WORD (in IDENTIFIER_NODE)
DECL_VTABLE_OR_VTT_P (in VAR_DECL)
FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
CALL_EXPR_REVERSE_ARGS (in CALL_EXPR, AGGR_INIT_EXPR)
6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE)
DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL)
TYPE_MARKED_P (in _TYPE)
RANGE_FOR_IVDEP (in RANGE_FOR_STMT)
CALL_EXPR_OPERATOR_SYNTAX (in CALL_EXPR, AGGR_INIT_EXPR)
Usage of TYPE_LANG_FLAG_?:
0: TYPE_DEPENDENT_P
1: TYPE_HAS_USER_CONSTRUCTOR.
2: TYPE_HAS_LATE_RETURN_TYPE (in FUNCTION_TYPE, METHOD_TYPE)
TYPE_PTRMEMFUNC_FLAG (in RECORD_TYPE)
4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR
5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE)
ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE)
AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM)
REFERENCE_VLA_OK (in REFERENCE_TYPE)
6: TYPE_DEPENDENT_P_VALID
Usage of DECL_LANG_FLAG_?:
0: DECL_ERROR_REPORTED (in VAR_DECL).
DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL)
DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL)
DECL_MUTABLE_P (in FIELD_DECL)
DECL_DEPENDENT_P (in USING_DECL)
LABEL_DECL_BREAK (in LABEL_DECL)
1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL).
DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL)
DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL)
USING_DECL_TYPENAME_P (in USING_DECL)
DECL_VLA_CAPTURE_P (in FIELD_DECL)
DECL_ARRAY_PARAMETER_P (in PARM_DECL)
LABEL_DECL_CONTINUE (in LABEL_DECL)
2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL).
DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL)
DECL_CONSTRAINT_VAR_P (in a PARM_DECL)
TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL)
DECL_INSTANTIATING_NSDMI_P (in a FIELD_DECL)
3: DECL_IN_AGGR_P.
4: DECL_C_BIT_FIELD (in a FIELD_DECL)
DECL_ANON_UNION_VAR_P (in a VAR_DECL)
DECL_SELF_REFERENCE_P (in a TYPE_DECL)
DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL)
5: DECL_INTERFACE_KNOWN.
6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL).
DECL_FIELD_IS_BASE (in FIELD_DECL)
TYPE_DECL_ALIAS_P (in TYPE_DECL)
7: DECL_DEAD_FOR_LOCAL (in VAR_DECL).
DECL_THUNK_P (in a member FUNCTION_DECL)
DECL_NORMAL_CAPTURE_P (in FIELD_DECL)
8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL)
Usage of language-independent fields in a language-dependent manner:
TYPE_ALIAS_SET
This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so
forth as a substitute for the mark bits provided in `lang_type'.
At present, only the six low-order bits are used.
TYPE_LANG_SLOT_1
For an ENUMERAL_TYPE, this is ENUM_TEMPLATE_INFO.
For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS
BINFO_VIRTUALS
For a binfo, this is a TREE_LIST. There is an entry for each
virtual function declared either in BINFO or its direct and
indirect primary bases.
The BV_DELTA of each node gives the amount by which to adjust the
`this' pointer when calling the function. If the method is an
overridden version of a base class method, then it is assumed
that, prior to adjustment, the this pointer points to an object
of the base class.
The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable
index of the vcall offset for this entry.
The BV_FN is the declaration for the virtual function itself.
If BV_LOST_PRIMARY is set, it means that this entry is for a lost
primary virtual base and can be left null in the vtable.
BINFO_VTABLE
This is an expression with POINTER_TYPE that gives the value
to which the vptr should be initialized. Use get_vtbl_decl_for_binfo
to extract the VAR_DECL for the complete vtable.
DECL_VINDEX
This field is NULL for a non-virtual function. For a virtual
function, it is eventually set to an INTEGER_CST indicating the
index in the vtable at which this function can be found. When
a virtual function is declared, but before it is known what
function is overridden, this field is the error_mark_node.
Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is
the virtual function this one overrides, and whose TREE_CHAIN is
the old DECL_VINDEX. */
/* Language-specific tree checkers. */
#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == FUNCTION_DECL)
#define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL)
#define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \
TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define THUNK_FUNCTION_CHECK(NODE) __extension__ \
({ __typeof (NODE) const __t = (NODE); \
if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \
|| !__t->decl_common.lang_specific->u.fn.thunk_p) \
tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \
__t; })
#else
#define THUNK_FUNCTION_CHECK(NODE) (NODE)
#endif
/* Language-dependent contents of an identifier. */
struct GTY(()) lang_identifier {
struct c_common_identifier c_common;
cxx_binding *namespace_bindings;
cxx_binding *bindings;
tree class_template_info;
tree label_value;
};
/* Return a typed pointer version of T if it designates a
C++ front-end identifier. */
inline lang_identifier*
identifier_p (tree t)
{
if (TREE_CODE (t) == IDENTIFIER_NODE)
return (lang_identifier*) t;
return NULL;
}
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_5 (ID)
#define LANG_IDENTIFIER_CAST(NODE) \
((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE))
struct GTY(()) template_parm_index {
struct tree_common common;
int index;
int level;
int orig_level;
tree decl;
};
struct GTY(()) ptrmem_cst {
struct tree_common common;
tree member;
};
typedef struct ptrmem_cst * ptrmem_cst_t;
#define IDENTIFIER_GLOBAL_VALUE(NODE) \
namespace_binding ((NODE), global_namespace)
#define SET_IDENTIFIER_GLOBAL_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), global_namespace, (VAL))
#define IDENTIFIER_NAMESPACE_VALUE(NODE) \
namespace_binding ((NODE), current_namespace)
#define SET_IDENTIFIER_NAMESPACE_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), current_namespace, (VAL))
#define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE))
#define BIND_EXPR_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE))
/* Used to mark the block around the member initializers and cleanups. */
#define BIND_EXPR_BODY_BLOCK(NODE) \
TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE))
#define FUNCTION_NEEDS_BODY_BLOCK(NODE) \
(DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \
|| LAMBDA_FUNCTION_P (NODE))
#define STATEMENT_LIST_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE))
#define STATEMENT_LIST_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE))
/* Mark the outer curly brace BLOCK. */
#define BLOCK_OUTER_CURLY_BRACE_P(NODE) TREE_LANG_FLAG_0 (BLOCK_CHECK (NODE))
/* Nonzero if this statement should be considered a full-expression,
i.e., if temporaries created during this statement should have
their destructors run at the end of this statement. */
#define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE))
/* Marks the result of a statement expression. */
#define EXPR_STMT_STMT_EXPR_RESULT(NODE) \
TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE))
/* Nonzero if this statement-expression does not have an associated scope. */
#define STMT_EXPR_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE))
#define COND_EXPR_IS_VEC_DELETE(NODE) \
TREE_LANG_FLAG_0 (COND_EXPR_CHECK (NODE))
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
sense of `same'. */
#define same_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
/* Returns nonzero iff NODE is a declaration for the global function
`main'. */
#define DECL_MAIN_P(NODE) \
(DECL_EXTERN_C_FUNCTION_P (NODE) \
&& DECL_NAME (NODE) != NULL_TREE \
&& MAIN_NAME_P (DECL_NAME (NODE)) \
&& flag_hosted)
/* The overloaded FUNCTION_DECL. */
#define OVL_FUNCTION(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->function)
#define OVL_CHAIN(NODE) TREE_CHAIN (NODE)
/* Polymorphic access to FUNCTION and CHAIN. */
#define OVL_CURRENT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? OVL_FUNCTION (NODE) : (NODE))
#define OVL_NEXT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? TREE_CHAIN (NODE) : NULL_TREE)
/* If set, this was imported in a using declaration.
This is not to confuse with being used somewhere, which
is not important for this node. */
#define OVL_USED(NODE) TREE_USED (OVERLOAD_CHECK (NODE))
/* If set, this OVERLOAD was created for argument-dependent lookup
and can be freed afterward. */
#define OVL_ARG_DEPENDENT(NODE) TREE_LANG_FLAG_0 (OVERLOAD_CHECK (NODE))
struct GTY(()) tree_overload {
struct tree_common common;
tree function;
};
struct GTY(()) tree_template_decl {
struct tree_decl_common common;
tree arguments;
tree result;
};
/* Returns true iff NODE is a BASELINK. */
#define BASELINK_P(NODE) \
(TREE_CODE (NODE) == BASELINK)
/* The BINFO indicating the base in which lookup found the
BASELINK_FUNCTIONS. */
#define BASELINK_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo)
/* The functions referred to by the BASELINK; either a FUNCTION_DECL,
a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */
#define BASELINK_FUNCTIONS(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->functions)
/* The BINFO in which the search for the functions indicated by this baselink
began. This base is used to determine the accessibility of functions
selected by overload resolution. */
#define BASELINK_ACCESS_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo)
/* For a type-conversion operator, the BASELINK_OPTYPE indicates the type
to which the conversion should occur. This value is important if
the BASELINK_FUNCTIONS include a template conversion operator --
the BASELINK_OPTYPE can be used to determine what type the user
requested. */
#define BASELINK_OPTYPE(NODE) \
(TREE_CHAIN (BASELINK_CHECK (NODE)))
/* Nonzero if this baselink was from a qualified lookup. */
#define BASELINK_QUALIFIED_P(NODE) \
TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE))
struct GTY(()) tree_baselink {
struct tree_common common;
tree binfo;
tree functions;
tree access_binfo;
};
/* The different kinds of ids that we encounter. */
enum cp_id_kind
{
/* Not an id at all. */
CP_ID_KIND_NONE,
/* An unqualified-id that is not a template-id. */
CP_ID_KIND_UNQUALIFIED,
/* An unqualified-id that is a dependent name. */
CP_ID_KIND_UNQUALIFIED_DEPENDENT,
/* An unqualified template-id. */
CP_ID_KIND_TEMPLATE_ID,
/* A qualified-id. */
CP_ID_KIND_QUALIFIED
};
/* The various kinds of C++0x warnings we encounter. */
enum cpp0x_warn_str
{
/* extended initializer lists */
CPP0X_INITIALIZER_LISTS,
/* explicit conversion operators */
CPP0X_EXPLICIT_CONVERSION,
/* variadic templates */
CPP0X_VARIADIC_TEMPLATES,
/* lambda expressions */
CPP0X_LAMBDA_EXPR,
/* C++0x auto */
CPP0X_AUTO,
/* scoped enums */
CPP0X_SCOPED_ENUMS,
/* defaulted and deleted functions */
CPP0X_DEFAULTED_DELETED,
/* inline namespaces */
CPP0X_INLINE_NAMESPACES,
/* override controls, override/final */
CPP0X_OVERRIDE_CONTROLS,
/* non-static data member initializers */
CPP0X_NSDMI,
/* user defined literals */
CPP0X_USER_DEFINED_LITERALS,
/* delegating constructors */
CPP0X_DELEGATING_CTORS,
/* inheriting constructors */
CPP0X_INHERITING_CTORS,
/* C++11 attributes */
CPP0X_ATTRIBUTES,
/* ref-qualified member functions */
CPP0X_REF_QUALIFIER
};
/* The various kinds of operation used by composite_pointer_type. */
enum composite_pointer_operation
{
/* comparison */
CPO_COMPARISON,
/* conversion */
CPO_CONVERSION,
/* conditional expression */
CPO_CONDITIONAL_EXPR
};
/* Possible cases of expression list used by build_x_compound_expr_from_list. */
enum expr_list_kind {
ELK_INIT, /* initializer */
ELK_MEM_INIT, /* member initializer */
ELK_FUNC_CAST /* functional cast */
};
/* Possible cases of implicit bad rhs conversions. */
enum impl_conv_rhs {
ICR_DEFAULT_ARGUMENT, /* default argument */
ICR_CONVERTING, /* converting */
ICR_INIT, /* initialization */
ICR_ARGPASS, /* argument passing */
ICR_RETURN, /* return */
ICR_ASSIGN /* assignment */
};
/* Possible cases of implicit or explicit bad conversions to void. */
enum impl_conv_void {
ICV_CAST, /* (explicit) conversion to void */
ICV_SECOND_OF_COND, /* second operand of conditional expression */
ICV_THIRD_OF_COND, /* third operand of conditional expression */
ICV_RIGHT_OF_COMMA, /* right operand of comma operator */
ICV_LEFT_OF_COMMA, /* left operand of comma operator */
ICV_STATEMENT, /* statement */
ICV_THIRD_IN_FOR /* for increment expression */
};
/* Possible invalid uses of an abstract class that might not have a
specific associated declaration. */
enum GTY(()) abstract_class_use {
ACU_UNKNOWN, /* unknown or decl provided */
ACU_CAST, /* cast to abstract class */
ACU_NEW, /* new-expression of abstract class */
ACU_THROW, /* throw-expression of abstract class */
ACU_CATCH, /* catch-parameter of abstract class */
ACU_ARRAY, /* array of abstract class */
ACU_RETURN, /* return type of abstract class */
ACU_PARM /* parameter type of abstract class */
};
/* Macros for access to language-specific slots in an identifier. */
#define IDENTIFIER_NAMESPACE_BINDINGS(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->namespace_bindings)
#define IDENTIFIER_TEMPLATE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->class_template_info)
/* The IDENTIFIER_BINDING is the innermost cxx_binding for the
identifier. It's PREVIOUS is the next outermost binding. Each
VALUE field is a DECL for the associated declaration. Thus,
name lookup consists simply of pulling off the node at the front
of the list (modulo oddities for looking up the names of types,
and such.) You can use SCOPE field to determine the scope
that bound the name. */
#define IDENTIFIER_BINDING(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->bindings)
/* TREE_TYPE only indicates on local and class scope the current
type. For namespace scope, the presence of a type in any namespace
is indicated with global_type_node, and the real type behind must
be found through lookup. */
#define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE)
#define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE)
#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE))
#define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0)
#define IDENTIFIER_LABEL_VALUE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->label_value)
#define SET_IDENTIFIER_LABEL_VALUE(NODE, VALUE) \
IDENTIFIER_LABEL_VALUE (NODE) = (VALUE)
/* Nonzero if this identifier is used as a virtual function name somewhere
(optimizes searches). */
#define IDENTIFIER_VIRTUAL_P(NODE) TREE_LANG_FLAG_1 (NODE)
/* Nonzero if this identifier is the prefix for a mangled C++ operator
name. */
#define IDENTIFIER_OPNAME_P(NODE) TREE_LANG_FLAG_2 (NODE)
/* Nonzero if this identifier is the name of a type-conversion
operator. */
#define IDENTIFIER_TYPENAME_P(NODE) \
TREE_LANG_FLAG_4 (NODE)
/* Nonzero if this identifier is the name of a constructor or
destructor. */
#define IDENTIFIER_CTOR_OR_DTOR_P(NODE) \
TREE_LANG_FLAG_3 (NODE)
/* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague
linkage which the prelinker has assigned to this translation
unit. */
#define IDENTIFIER_REPO_CHOSEN(NAME) \
(TREE_LANG_FLAG_6 (NAME))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly)
/* The tokens stored in the default argument. */
#define DEFARG_TOKENS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens)
#define DEFARG_INSTANTIATIONS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations)
struct GTY (()) tree_default_arg {
struct tree_common common;
struct cp_token_cache *tokens;
vec<tree, va_gc> *instantiations;
};
#define DEFERRED_NOEXCEPT_PATTERN(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern)
#define DEFERRED_NOEXCEPT_ARGS(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args)
#define DEFERRED_NOEXCEPT_SPEC_P(NODE) \
((NODE) && (TREE_PURPOSE (NODE)) \
&& (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT))
#define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \
(DEFERRED_NOEXCEPT_SPEC_P (NODE) \
&& DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE)
struct GTY (()) tree_deferred_noexcept {
struct tree_base base;
tree pattern;
tree args;
};
/* The condition associated with the static assertion. This must be
an integral constant expression. */
#define STATIC_ASSERT_CONDITION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition)
/* The message associated with the static assertion. This must be a
string constant, which will be emitted as an error message when the
static assert condition is false. */
#define STATIC_ASSERT_MESSAGE(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message)
/* Source location information for a static assertion. */
#define STATIC_ASSERT_SOURCE_LOCATION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location)
struct GTY (()) tree_static_assert {
struct tree_common common;
tree condition;
tree message;
location_t location;
};
struct GTY (()) tree_argument_pack_select {
struct tree_common common;
tree argument_pack;
int index;
};
/* The different kinds of traits that we encounter. */
enum cp_trait_kind
{
CPTK_BASES,
CPTK_DIRECT_BASES,
CPTK_HAS_NOTHROW_ASSIGN,
CPTK_HAS_NOTHROW_CONSTRUCTOR,
CPTK_HAS_NOTHROW_COPY,
CPTK_HAS_TRIVIAL_ASSIGN,
CPTK_HAS_TRIVIAL_CONSTRUCTOR,
CPTK_HAS_TRIVIAL_COPY,
CPTK_HAS_TRIVIAL_DESTRUCTOR,
CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS,
CPTK_HAS_VIRTUAL_DESTRUCTOR,
CPTK_IS_ABSTRACT,
CPTK_IS_AGGREGATE,
CPTK_IS_BASE_OF,
CPTK_IS_CLASS,
CPTK_IS_EMPTY,
CPTK_IS_ENUM,
CPTK_IS_FINAL,
CPTK_IS_LITERAL_TYPE,
CPTK_IS_POD,
CPTK_IS_POLYMORPHIC,
CPTK_IS_SAME_AS,
CPTK_IS_STD_LAYOUT,
CPTK_IS_TRIVIAL,
CPTK_IS_TRIVIALLY_ASSIGNABLE,
CPTK_IS_TRIVIALLY_CONSTRUCTIBLE,
CPTK_IS_TRIVIALLY_COPYABLE,
CPTK_IS_UNION,
CPTK_UNDERLYING_TYPE
};
/* The types that we are processing. */
#define TRAIT_EXPR_TYPE1(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1)
#define TRAIT_EXPR_TYPE2(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2)
/* The specific trait that we are processing. */
#define TRAIT_EXPR_KIND(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind)
struct GTY (()) tree_trait_expr {
struct tree_common common;
tree type1;
tree type2;
enum cp_trait_kind kind;
};
/* Based off of TYPE_UNNAMED_P. */
#define LAMBDA_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE))
/* Test if FUNCTION_DECL is a lambda function. */
#define LAMBDA_FUNCTION_P(FNDECL) \
(DECL_OVERLOADED_OPERATOR_P (FNDECL) == CALL_EXPR \
&& LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL)))
enum cp_lambda_default_capture_mode_type {
CPLD_NONE,
CPLD_COPY,
CPLD_REFERENCE
};
/* The method of default capture, if any. */
#define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode)
/* The capture-list, including `this'. Each capture is stored as a FIELD_DECL
* so that the name, type, and field are all together, whether or not it has
* been added to the lambda's class type.
TREE_LIST:
TREE_PURPOSE: The FIELD_DECL for this capture.
TREE_VALUE: The initializer. This is part of a GNU extension. */
#define LAMBDA_EXPR_CAPTURE_LIST(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list)
/* During parsing of the lambda-introducer, the node in the capture-list
that holds the 'this' capture. During parsing of the body, the
capture proxy for that node. */
#define LAMBDA_EXPR_THIS_CAPTURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture)
/* Predicate tracking whether `this' is in the effective capture set. */
#define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \
LAMBDA_EXPR_THIS_CAPTURE(NODE)
/* Predicate tracking whether the lambda was declared 'mutable'. */
#define LAMBDA_EXPR_MUTABLE_P(NODE) \
TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE))
/* The return type in the expression.
* NULL_TREE indicates that none was specified. */
#define LAMBDA_EXPR_RETURN_TYPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->return_type)
/* The source location of the lambda. */
#define LAMBDA_EXPR_LOCATION(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus)
/* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL,
FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */
#define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope)
/* If EXTRA_SCOPE, this is the number of the lambda within that scope. */
#define LAMBDA_EXPR_DISCRIMINATOR(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator)
/* During parsing of the lambda, a vector of capture proxies which need
to be pushed once we're done processing a nested lambda. */
#define LAMBDA_EXPR_PENDING_PROXIES(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies)
/* The closure type of the lambda. Note that the TREE_TYPE of a
LAMBDA_EXPR is always NULL_TREE, because we need to instantiate the
LAMBDA_EXPR in order to instantiate the type. */
#define LAMBDA_EXPR_CLOSURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->closure)
struct GTY (()) tree_lambda_expr
{
struct tree_typed typed;
tree capture_list;
tree this_capture;
tree return_type;
tree extra_scope;
tree closure;
vec<tree, va_gc> *pending_proxies;
location_t locus;
enum cp_lambda_default_capture_mode_type default_capture_mode;
int discriminator;
};
/* A (typedef,context,usage location) triplet.
It represents a typedef used through a
context at a given source location.
e.g.
struct foo {
typedef int myint;
};
struct bar {
foo::myint v; // #1<-- this location.
};
In bar, the triplet will be (myint, foo, #1).
*/
struct GTY(()) qualified_typedef_usage_s {
tree typedef_decl;
tree context;
location_t locus;
};
typedef struct qualified_typedef_usage_s qualified_typedef_usage_t;
/* Non-zero if this template specialization has access violations that
should be rechecked when the function is instantiated outside argument
deduction. */
#define TINFO_HAS_ACCESS_ERRORS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE)))
#define FNDECL_HAS_ACCESS_ERRORS(NODE) \
(TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE)))
/* Non-zero if this variable template specialization was specified using a
template-id, so it's a partial or full specialization and not a definition
of the member template of a particular class specialization. */
#define TINFO_USED_TEMPLATE_ID(NODE) \
(TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE)))
struct GTY(()) tree_template_info {
struct tree_common common;
vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking;
};
// Constraint information for a C++ declaration. Constraint information is
// comprised of:
//
// - a constraint expression introduced by the template header
// - a constraint expression introduced by a function declarator
// - the associated constraints, which are the conjunction of those,
// and used for declaration matching
//
// The template and declarator requirements are kept to support pretty
// printing constrained declarations.
struct GTY(()) tree_constraint_info {
struct tree_base base;
tree template_reqs;
tree declarator_reqs;
tree associated_constr;
};
// Require that pointer P is non-null before returning.
template<typename T>
inline T*
check_nonnull (T* p)
{
gcc_assert (p);
return p;
}
// Returns true iff T is non-null and represents constraint info.
inline tree_constraint_info *
check_constraint_info (tree t)
{
if (t && TREE_CODE (t) == CONSTRAINT_INFO)
return (tree_constraint_info *)t;
return NULL;
}
// Access the expression describing the template constraints. This may be
// null if no constraints were introduced in the template parameter list,
// a requirements clause after the template parameter list, or constraints
// through a constrained-type-specifier.
#define CI_TEMPLATE_REQS(NODE) \
check_constraint_info (check_nonnull(NODE))->template_reqs
// Access the expression describing the trailing constraints. This is non-null
// for any implicit instantiation of a constrained declaration. For a
// templated declaration it is non-null only when a trailing requires-clause
// was specified.
#define CI_DECLARATOR_REQS(NODE) \
check_constraint_info (check_nonnull(NODE))->declarator_reqs
// The computed associated constraint expression for a declaration.
#define CI_ASSOCIATED_CONSTRAINTS(NODE) \
check_constraint_info (check_nonnull(NODE))->associated_constr
// Access the logical constraints on the template parameters introduced
// at a given template parameter list level indicated by NODE.
#define TEMPLATE_PARMS_CONSTRAINTS(NODE) \
TREE_TYPE (TREE_LIST_CHECK (NODE))
// Access the logical constraints on the template parameter declaration
// indicated by NODE.
#define TEMPLATE_PARM_CONSTRAINTS(NODE) \
TREE_TYPE (TREE_LIST_CHECK (NODE))
/* Non-zero if the noexcept is present in a compound requirement. */
#define COMPOUND_REQ_NOEXCEPT_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK (NODE, COMPOUND_REQ))
/* The constraints on an 'auto' placeholder type, used in an argument deduction
constraint. */
#define PLACEHOLDER_TYPE_CONSTRAINTS(NODE) \
DECL_SIZE_UNIT (TYPE_NAME (NODE))
/* The expression evaluated by the predicate constraint. */
#define PRED_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PRED_CONSTR), 0)
/* The concept of a concept check. */
#define CHECK_CONSTR_CONCEPT(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 0)
/* The template arguments of a concept check. */
#define CHECK_CONSTR_ARGS(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 1)
/* The expression validated by the predicate constraint. */
#define EXPR_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, EXPR_CONSTR), 0)
/* The type validated by the predicate constraint. */
#define TYPE_CONSTR_TYPE(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, TYPE_CONSTR), 0)
/* In an implicit conversion constraint, the source expression. */
#define ICONV_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 0)
/* In an implicit conversion constraint, the target type. */
#define ICONV_CONSTR_TYPE(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 1)
/* In an argument deduction constraint, the source expression. */
#define DEDUCT_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 0)
/* In an argument deduction constraint, the target type pattern. */
#define DEDUCT_CONSTR_PATTERN(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 1)
/* In an argument deduction constraint, the list of placeholder nodes. */
#define DEDUCT_CONSTR_PLACEHOLDER(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 2)
/* The expression of an exception constraint. */
#define EXCEPT_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, EXCEPT_CONSTR), 0)
/* In a parameterized constraint, the local parameters. */
#define PARM_CONSTR_PARMS(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 0)
/* In a parameterized constraint, the operand. */
#define PARM_CONSTR_OPERAND(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 1)
/* Whether a PARM_DECL represents a local parameter in a
requires-expression. */
#define CONSTRAINT_VAR_P(NODE) \
DECL_LANG_FLAG_2 (TREE_CHECK (NODE, PARM_DECL))
/* The concept constraining this constrained template-parameter. */
#define CONSTRAINED_PARM_CONCEPT(NODE) \
DECL_SIZE_UNIT (TYPE_DECL_CHECK (NODE))
/* Any extra template arguments specified for a constrained
template-parameter. */
#define CONSTRAINED_PARM_EXTRA_ARGS(NODE) \
DECL_SIZE (TYPE_DECL_CHECK (NODE))
/* The first template parameter of CONSTRAINED_PARM_CONCEPT to be used as a
prototype for the constrained parameter in finish_shorthand_constraint,
attached for convenience. */
#define CONSTRAINED_PARM_PROTOTYPE(NODE) \
DECL_INITIAL (TYPE_DECL_CHECK (NODE))
enum cp_tree_node_structure_enum {
TS_CP_GENERIC,
TS_CP_IDENTIFIER,
TS_CP_TPI,
TS_CP_PTRMEM,
TS_CP_BINDING,
TS_CP_OVERLOAD,
TS_CP_BASELINK,
TS_CP_TEMPLATE_DECL,
TS_CP_WRAPPER,
TS_CP_DEFAULT_ARG,
TS_CP_DEFERRED_NOEXCEPT,
TS_CP_STATIC_ASSERT,
TS_CP_ARGUMENT_PACK_SELECT,
TS_CP_TRAIT_EXPR,
TS_CP_LAMBDA_EXPR,
TS_CP_TEMPLATE_INFO,
TS_CP_CONSTRAINT_INFO,
TS_CP_USERDEF_LITERAL,
LAST_TS_CP_ENUM
};
/* The resulting tree type. */
union GTY((desc ("cp_tree_node_structure (&%h)"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node {
union tree_node GTY ((tag ("TS_CP_GENERIC"),
desc ("tree_node_structure (&%h)"))) generic;
struct template_parm_index GTY ((tag ("TS_CP_TPI"))) tpi;
struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem;
struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload;
struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink;
struct tree_template_decl GTY ((tag ("TS_CP_TEMPLATE_DECL"))) template_decl;
struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg;
struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept;
struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier;
struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT")))
static_assertion;
struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT")))
argument_pack_select;
struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR")))
trait_expression;
struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR")))
lambda_expression;
struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO")))
template_info;
struct tree_constraint_info GTY ((tag ("TS_CP_CONSTRAINT_INFO")))
constraint_info;
struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL")))
userdef_literal;
};
enum cp_tree_index
{
CPTI_WCHAR_DECL,
CPTI_VTABLE_ENTRY_TYPE,
CPTI_DELTA_TYPE,
CPTI_VTABLE_INDEX_TYPE,
CPTI_CLEANUP_TYPE,
CPTI_VTT_PARM_TYPE,
CPTI_CLASS_TYPE,
CPTI_UNKNOWN_TYPE,
CPTI_INIT_LIST_TYPE,
CPTI_VTBL_TYPE,
CPTI_VTBL_PTR_TYPE,
CPTI_STD,
CPTI_ABI,
CPTI_CONST_TYPE_INFO_TYPE,
CPTI_TYPE_INFO_PTR_TYPE,
CPTI_ABORT_FNDECL,
CPTI_AGGR_TAG,
CPTI_CTOR_IDENTIFIER,
CPTI_COMPLETE_CTOR_IDENTIFIER,
CPTI_BASE_CTOR_IDENTIFIER,
CPTI_DTOR_IDENTIFIER,
CPTI_COMPLETE_DTOR_IDENTIFIER,
CPTI_BASE_DTOR_IDENTIFIER,
CPTI_DELETING_DTOR_IDENTIFIER,
CPTI_DELTA_IDENTIFIER,
CPTI_IN_CHARGE_IDENTIFIER,
CPTI_VTT_PARM_IDENTIFIER,
CPTI_NELTS_IDENTIFIER,
CPTI_THIS_IDENTIFIER,
CPTI_PFN_IDENTIFIER,
CPTI_VPTR_IDENTIFIER,
CPTI_STD_IDENTIFIER,
CPTI_AUTO_IDENTIFIER,
CPTI_DECLTYPE_AUTO_IDENTIFIER,
CPTI_LANG_NAME_C,
CPTI_LANG_NAME_CPLUSPLUS,
CPTI_EMPTY_EXCEPT_SPEC,
CPTI_NOEXCEPT_TRUE_SPEC,
CPTI_NOEXCEPT_FALSE_SPEC,
CPTI_TERMINATE,
CPTI_CALL_UNEXPECTED,
CPTI_ATEXIT_FN_PTR_TYPE,
CPTI_ATEXIT,
CPTI_DSO_HANDLE,
CPTI_DCAST,
CPTI_KEYED_CLASSES,
CPTI_NULLPTR,
CPTI_NULLPTR_TYPE,
CPTI_ALIGN_TYPE,
CPTI_ANY_TARG,
CPTI_MAX
};
extern GTY(()) tree cp_global_trees[CPTI_MAX];
#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL]
#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE]
/* The type used to represent an offset by which to adjust the `this'
pointer in pointer-to-member types. */
#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE]
/* The type used to represent an index into the vtable. */
#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE]
#define class_type_node cp_global_trees[CPTI_CLASS_TYPE]
#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE]
#define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE]
#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE]
#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE]
#define std_node cp_global_trees[CPTI_STD]
#define abi_node cp_global_trees[CPTI_ABI]
#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE]
#define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE]
#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL]
#define current_aggr cp_global_trees[CPTI_AGGR_TAG]
#define nullptr_node cp_global_trees[CPTI_NULLPTR]
#define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE]
/* std::align_val_t */
#define align_type_node cp_global_trees[CPTI_ALIGN_TYPE]
/* We cache these tree nodes so as to call get_identifier less
frequently. */
/* The name of a constructor that takes an in-charge parameter to
decide whether or not to construct virtual base classes. */
#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER]
/* The name of a constructor that constructs virtual base classes. */
#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER]
/* The name of a constructor that does not construct virtual base classes. */
#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER]
/* The name of a destructor that takes an in-charge parameter to
decide whether or not to destroy virtual base classes and whether
or not to delete the object. */
#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes. */
#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER]
/* The name of a destructor that does not destroy virtual base
classes. */
#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes, and
then deletes the entire object. */
#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER]
#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER]
#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER]
/* The name of the parameter that contains a pointer to the VTT to use
for this subobject constructor or destructor. */
#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER]
#define nelts_identifier cp_global_trees[CPTI_NELTS_IDENTIFIER]
#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER]
#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER]
#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER]
/* The name of the std namespace. */
#define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER]
/* auto and declspec(auto) identifiers. */
#define auto_identifier cp_global_trees[CPTI_AUTO_IDENTIFIER]
#define decltype_auto_identifier cp_global_trees[CPTI_DECLTYPE_AUTO_IDENTIFIER]
/* The name of a C++17 deduction guide. */
#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
/* Exception specifiers used for throw(), noexcept(true) and
noexcept(false). We rely on these being uncloned. */
#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC]
#define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC]
#define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC]
/* The declaration for `std::terminate'. */
#define terminate_node cp_global_trees[CPTI_TERMINATE]
/* The declaration for "__cxa_call_unexpected". */
#define call_unexpected_node cp_global_trees[CPTI_CALL_UNEXPECTED]
/* The type of the function-pointer argument to "__cxa_atexit" (or
"std::atexit", if "__cxa_atexit" is not being used). */
#define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE]
/* A pointer to `std::atexit'. */
#define atexit_node cp_global_trees[CPTI_ATEXIT]
/* A pointer to `__dso_handle'. */
#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE]
/* The declaration of the dynamic_cast runtime. */
#define dynamic_cast_node cp_global_trees[CPTI_DCAST]
/* The type of a destructor. */
#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE]
/* The type of the vtt parameter passed to subobject constructors and
destructors. */
#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE]
/* A TREE_LIST of the dynamic classes whose vtables may have to be
emitted in this translation unit. */
#define keyed_classes cp_global_trees[CPTI_KEYED_CLASSES]
/* A node which matches any template argument. */
#define any_targ_node cp_global_trees[CPTI_ANY_TARG]
/* Node to indicate default access. This must be distinct from the
access nodes in tree.h. */
#define access_default_node null_node
/* Global state. */
struct GTY(()) saved_scope {
vec<cxx_saved_binding, va_gc> *old_bindings;
tree old_namespace;
vec<tree, va_gc> *decl_ns_list;
tree class_name;
tree class_type;
tree access_specifier;
tree function_decl;
vec<tree, va_gc> *lang_base;
tree lang_name;
tree template_parms;
cp_binding_level *x_previous_class_level;
tree x_saved_tree;
/* Only used for uses of this in trailing return type. */
tree x_current_class_ptr;
tree x_current_class_ref;
int x_processing_template_decl;
int x_processing_specialization;
BOOL_BITFIELD x_processing_explicit_instantiation : 1;
BOOL_BITFIELD need_pop_function_context : 1;
/* Nonzero if we are parsing the discarded statement of a constexpr
if-statement. */
BOOL_BITFIELD discarded_stmt : 1;
int unevaluated_operand;
int inhibit_evaluation_warnings;
int noexcept_operand;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int omp_declare_target_attribute;
struct stmt_tree_s x_stmt_tree;
cp_binding_level *class_bindings;
cp_binding_level *bindings;
hash_map<tree, tree> *GTY((skip)) x_local_specializations;
struct saved_scope *prev;
};
extern GTY(()) struct saved_scope *scope_chain;
/* The current open namespace. */
#define current_namespace scope_chain->old_namespace
/* The stack for namespaces of current declarations. */
#define decl_namespace_list scope_chain->decl_ns_list
/* IDENTIFIER_NODE: name of current class */
#define current_class_name scope_chain->class_name
/* _TYPE: the type of the current class */
#define current_class_type scope_chain->class_type
/* When parsing a class definition, the access specifier most recently
given by the user, or, if no access specifier was given, the
default value appropriate for the kind of class (i.e., struct,
class, or union). */
#define current_access_specifier scope_chain->access_specifier
/* Pointer to the top of the language name stack. */
#define current_lang_base scope_chain->lang_base
#define current_lang_name scope_chain->lang_name
/* When parsing a template declaration, a TREE_LIST represents the
active template parameters. Each node in the list represents one
level of template parameters. The innermost level is first in the
list. The depth of each level is stored as an INTEGER_CST in the
TREE_PURPOSE of each node. The parameters for that level are
stored in the TREE_VALUE. */
#define current_template_parms scope_chain->template_parms
#define processing_template_decl scope_chain->x_processing_template_decl
#define processing_specialization scope_chain->x_processing_specialization
#define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation
#define in_discarded_stmt scope_chain->discarded_stmt
/* RAII sentinel to handle clearing processing_template_decl and restoring
it when done. */
struct processing_template_decl_sentinel
{
int saved;
processing_template_decl_sentinel (bool reset = true)
: saved (processing_template_decl)
{
if (reset)
processing_template_decl = 0;
}
~processing_template_decl_sentinel()
{
processing_template_decl = saved;
}
};
/* RAII sentinel to disable certain warnings during template substitution
and elsewhere. */
struct warning_sentinel
{
int &flag;
int val;
warning_sentinel(int& flag, bool suppress=true)
: flag(flag), val(flag) { if (suppress) flag = 0; }
~warning_sentinel() { flag = val; }
};
/* The cached class binding level, from the most recently exited
class, or NULL if none. */
#define previous_class_level scope_chain->x_previous_class_level
/* A map from local variable declarations in the body of the template
presently being instantiated to the corresponding instantiated
local variables. */
#define local_specializations scope_chain->x_local_specializations
/* Nonzero if we are parsing the operand of a noexcept operator. */
#define cp_noexcept_operand scope_chain->noexcept_operand
/* A list of private types mentioned, for deferred access checking. */
struct GTY((for_user)) cxx_int_tree_map {
unsigned int uid;
tree to;
};
struct cxx_int_tree_map_hasher : ggc_ptr_hash<cxx_int_tree_map>
{
static hashval_t hash (cxx_int_tree_map *);
static bool equal (cxx_int_tree_map *, cxx_int_tree_map *);
};
struct named_label_entry;
struct named_label_hasher : ggc_ptr_hash<named_label_entry>
{
static hashval_t hash (named_label_entry *);
static bool equal (named_label_entry *, named_label_entry *);
};
/* Global state pertinent to the current function. */
struct GTY(()) language_function {
struct c_language_function base;
tree x_cdtor_label;
tree x_current_class_ptr;
tree x_current_class_ref;
tree x_eh_spec_block;
tree x_in_charge_parm;
tree x_vtt_parm;
tree x_return_value;
tree x_auto_return_pattern;
BOOL_BITFIELD returns_value : 1;
BOOL_BITFIELD returns_null : 1;
BOOL_BITFIELD returns_abnormally : 1;
BOOL_BITFIELD infinite_loop: 1;
BOOL_BITFIELD x_in_function_try_handler : 1;
BOOL_BITFIELD x_in_base_initializer : 1;
/* True if this function can throw an exception. */
BOOL_BITFIELD can_throw : 1;
BOOL_BITFIELD invalid_constexpr : 1;
hash_table<named_label_hasher> *x_named_labels;
cp_binding_level *bindings;
vec<tree, va_gc> *x_local_names;
/* Tracking possibly infinite loops. This is a vec<tree> only because
vec<bool> doesn't work with gtype. */
vec<tree, va_gc> *infinite_loops;
hash_table<cxx_int_tree_map_hasher> *extern_decl_map;
};
/* The current C++-specific per-function global variables. */
#define cp_function_chain (cfun->language)
/* In a constructor destructor, the point at which all derived class
destroying/construction has been done. I.e., just before a
constructor returns, or before any base class destroying will be done
in a destructor. */
#define cdtor_label cp_function_chain->x_cdtor_label
/* When we're processing a member function, current_class_ptr is the
PARM_DECL for the `this' pointer. The current_class_ref is an
expression for `*this'. */
#define current_class_ptr \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ptr \
: &scope_chain->x_current_class_ptr))
#define current_class_ref \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ref \
: &scope_chain->x_current_class_ref))
/* The EH_SPEC_BLOCK for the exception-specifiers for the current
function, if any. */
#define current_eh_spec_block cp_function_chain->x_eh_spec_block
/* The `__in_chrg' parameter for the current function. Only used for
constructors and destructors. */
#define current_in_charge_parm cp_function_chain->x_in_charge_parm
/* The `__vtt_parm' parameter for the current function. Only used for
constructors and destructors. */
#define current_vtt_parm cp_function_chain->x_vtt_parm
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
#define current_function_returns_value cp_function_chain->returns_value
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
#define current_function_returns_null cp_function_chain->returns_null
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
#define current_function_returns_abnormally \
cp_function_chain->returns_abnormally
/* Set to 0 at beginning of a function definition, set to 1 if we see an
obvious infinite loop. This can have false positives and false
negatives, so it should only be used as a heuristic. */
#define current_function_infinite_loop cp_function_chain->infinite_loop
/* Nonzero if we are processing a base initializer. Zero elsewhere. */
#define in_base_initializer cp_function_chain->x_in_base_initializer
#define in_function_try_handler cp_function_chain->x_in_function_try_handler
/* Expression always returned from function, or error_mark_node
otherwise, for use by the automatic named return value optimization. */
#define current_function_return_value \
(cp_function_chain->x_return_value)
/* A type involving 'auto' to be used for return type deduction. */
#define current_function_auto_return_pattern \
(cp_function_chain->x_auto_return_pattern)
/* True if NAME is the IDENTIFIER_NODE for an overloaded "operator
new" or "operator delete". */
#define NEW_DELETE_OPNAME_P(NAME) \
((NAME) == cp_operator_id (NEW_EXPR) \
|| (NAME) == cp_operator_id (VEC_NEW_EXPR) \
|| (NAME) == cp_operator_id (DELETE_EXPR) \
|| (NAME) == cp_operator_id (VEC_DELETE_EXPR))
#define cp_operator_id(CODE) \
(operator_name_info[(int) (CODE)].identifier)
#define cp_assignment_operator_id(CODE) \
(assignment_operator_name_info[(int) (CODE)].identifier)
/* In parser.c. */
extern tree cp_literal_operator_id (const char *);
/* TRUE if a tree code represents a statement. */
extern bool statement_code_p[MAX_TREE_CODES];
#define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)]
enum languages { lang_c, lang_cplusplus };
/* Macros to make error reporting functions' lives easier. */
#define TYPE_LINKAGE_IDENTIFIER(NODE) \
(TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE)))
#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
/* Nonzero if NODE has no name for linkage purposes. */
#define TYPE_UNNAMED_P(NODE) \
(OVERLOAD_TYPE_P (NODE) && anon_aggrname_p (TYPE_LINKAGE_IDENTIFIER (NODE)))
/* The _DECL for this _TYPE. */
#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
/* Nonzero if T is a type that could resolve to any kind of concrete type
at instantiation time. */
#define WILDCARD_TYPE_P(T) \
(TREE_CODE (T) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (T) == TYPENAME_TYPE \
|| TREE_CODE (T) == TYPEOF_TYPE \
|| TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| TREE_CODE (T) == DECLTYPE_TYPE)
/* Nonzero if T is a class (or struct or union) type. Also nonzero
for template type parameters, typename types, and instantiated
template template parameters. Keep these checks in ascending code
order. */
#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
/* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or
union type. */
#define SET_CLASS_TYPE_P(T, VAL) \
(TYPE_LANG_FLAG_5 (T) = (VAL))
/* Nonzero if T is a class type. Zero for template type parameters,
typename types, and so forth. */
#define CLASS_TYPE_P(T) \
(RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T))
/* Nonzero if T is a class type but not an union. */
#define NON_UNION_CLASS_TYPE_P(T) \
(CLASS_TYPE_P (T) && TREE_CODE (T) != UNION_TYPE)
/* Keep these checks in ascending code order. */
#define RECORD_OR_UNION_CODE_P(T) \
((T) == RECORD_TYPE || (T) == UNION_TYPE)
#define OVERLOAD_TYPE_P(T) \
(CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE)
/* True if this type is dependent. This predicate is only valid if
TYPE_DEPENDENT_P_VALID is true. */
#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE)
/* True if dependent_type_p has been called for this type, with the
result that TYPE_DEPENDENT_P is valid. */
#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE)
/* Nonzero if this type is const-qualified. */
#define CP_TYPE_CONST_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0)
/* Nonzero if this type is volatile-qualified. */
#define CP_TYPE_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0)
/* Nonzero if this type is restrict-qualified. */
#define CP_TYPE_RESTRICT_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0)
/* Nonzero if this type is const-qualified, but not
volatile-qualified. Other qualifiers are ignored. This macro is
used to test whether or not it is OK to bind an rvalue to a
reference. */
#define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \
== TYPE_QUAL_CONST)
#define FUNCTION_ARG_CHAIN(NODE) \
TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES
which refers to a user-written parameter. */
#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \
skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Similarly, but for DECL_ARGUMENTS. */
#define FUNCTION_FIRST_USER_PARM(NODE) \
skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE))
/* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and
ambiguity issues. */
#define DERIVED_FROM_P(PARENT, TYPE) \
(lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE)
/* Gives the visibility specification for a class type. */
#define CLASSTYPE_VISIBILITY(TYPE) \
DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE))
#define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \
DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE))
struct GTY (()) tree_pair_s {
tree purpose;
tree value;
};
typedef tree_pair_s *tree_pair_p;
/* This is a few header flags for 'struct lang_type'. Actually,
all but the first are used only for lang_type_class; they
are put in this structure to save space. */
struct GTY(()) lang_type_header {
BOOL_BITFIELD is_lang_type_class : 1;
BOOL_BITFIELD has_type_conversion : 1;
BOOL_BITFIELD has_copy_ctor : 1;
BOOL_BITFIELD has_default_ctor : 1;
BOOL_BITFIELD const_needs_init : 1;
BOOL_BITFIELD ref_needs_init : 1;
BOOL_BITFIELD has_const_copy_assign : 1;
BOOL_BITFIELD spare : 1;
};
/* This structure provides additional information above and beyond
what is provide in the ordinary tree_type. In the past, we used it
for the types of class types, template parameters types, typename
types, and so forth. However, there can be many (tens to hundreds
of thousands) of template parameter types in a compilation, and
there's no need for this additional information in that case.
Therefore, we now use this data structure only for class types.
In the past, it was thought that there would be relatively few
class types. However, in the presence of heavy use of templates,
many (i.e., thousands) of classes can easily be generated.
Therefore, we should endeavor to keep the size of this structure to
a minimum. */
struct GTY(()) lang_type_class {
struct lang_type_header h;
unsigned char align;
unsigned has_mutable : 1;
unsigned com_interface : 1;
unsigned non_pod_class : 1;
unsigned nearly_empty_p : 1;
unsigned user_align : 1;
unsigned has_copy_assign : 1;
unsigned has_new : 1;
unsigned has_array_new : 1;
unsigned gets_delete : 2;
unsigned interface_only : 1;
unsigned interface_unknown : 1;
unsigned contains_empty_class_p : 1;
unsigned anon_aggr : 1;
unsigned non_zero_init : 1;
unsigned empty_p : 1;
unsigned vec_new_uses_cookie : 1;
unsigned declared_class : 1;
unsigned diamond_shaped : 1;
unsigned repeated_base : 1;
unsigned being_defined : 1;
unsigned debug_requested : 1;
unsigned fields_readonly : 1;
unsigned ptrmemfunc_flag : 1;
unsigned use_template : 2;
unsigned was_anonymous : 1;
unsigned lazy_default_ctor : 1;
unsigned lazy_copy_ctor : 1;
unsigned lazy_copy_assign : 1;
unsigned lazy_destructor : 1;
unsigned has_const_copy_ctor : 1;
unsigned has_complex_copy_ctor : 1;
unsigned has_complex_copy_assign : 1;
unsigned non_aggregate : 1;
unsigned has_complex_dflt : 1;
unsigned has_list_ctor : 1;
unsigned non_std_layout : 1;
unsigned is_literal : 1;
unsigned lazy_move_ctor : 1;
unsigned lazy_move_assign : 1;
unsigned has_complex_move_ctor : 1;
unsigned has_complex_move_assign : 1;
unsigned has_constexpr_ctor : 1;
unsigned unique_obj_representations : 1;
unsigned unique_obj_representations_set : 1;
/* When adding a flag here, consider whether or not it ought to
apply to a template instance if it applies to the template. If
so, make sure to copy it in instantiate_class_template! */
/* There are some bits left to fill out a 32-bit word. Keep track
of this by updating the size of this bitfield whenever you add or
remove a flag. */
unsigned dummy : 2;
tree primary_base;
vec<tree_pair_s, va_gc> *vcall_indices;
tree vtables;
tree typeinfo_var;
vec<tree, va_gc> *vbases;
binding_table nested_udts;
tree as_base;
vec<tree, va_gc> *pure_virtuals;
tree friend_classes;
vec<tree, va_gc> * GTY((reorder ("resort_type_method_vec"))) methods;
tree key_method;
tree decl_list;
tree template_info;
tree befriending_classes;
/* In a RECORD_TYPE, information specific to Objective-C++, such
as a list of adopted protocols or a pointer to a corresponding
@interface. See objc/objc-act.h for details. */
tree objc_info;
/* sorted_fields is sorted based on a pointer, so we need to be able
to resort it if pointers get rearranged. */
struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields")))
sorted_fields;
/* FIXME reuse another field? */
tree lambda_expr;
};
struct GTY(()) lang_type_ptrmem {
struct lang_type_header h;
tree record;
};
struct GTY(()) lang_type {
union lang_type_u
{
struct lang_type_header GTY((skip (""))) h;
struct lang_type_class GTY((tag ("1"))) c;
struct lang_type_ptrmem GTY((tag ("0"))) ptrmem;
} GTY((desc ("%h.h.is_lang_type_class"))) u;
};
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_TYPE_CLASS_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (! lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.c; })
#define LANG_TYPE_PTRMEM_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ptrmem; })
#else
#define LANG_TYPE_CLASS_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.c)
#define LANG_TYPE_PTRMEM_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.ptrmem)
#endif /* ENABLE_TREE_CHECKING */
/* Nonzero for _CLASSTYPE means that operator delete is defined. */
#define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete)
#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1)
/* Nonzero if `new NODE[x]' should cause the allocation of extra
storage to indicate how many array elements are in use. */
#define TYPE_VEC_NEW_USES_COOKIE(NODE) \
(CLASS_TYPE_P (NODE) \
&& LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie)
/* Nonzero means that this _CLASSTYPE node defines ways of converting
itself to other types. */
#define TYPE_HAS_CONVERSION(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_type_conversion)
/* Nonzero means that NODE (a class type) has a default constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor)
/* Nonzero means that NODE (a class type) has a copy constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor)
/* Nonzero means that NODE (a class type) has a move constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign)
/* Nonzero means that NODE (a class type) has a destructor -- but that
it has not yet been declared. */
#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor)
/* Nonzero means that NODE (a class type) is final */
#define CLASSTYPE_FINAL(NODE) \
TYPE_FINAL_P (NODE)
/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */
#define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign)
/* True iff the class type NODE has an "operator =" whose parameter
has a parameter of type "const X&". */
#define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_const_copy_assign)
/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */
#define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->h.has_copy_ctor)
#define TYPE_HAS_CONST_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor)
/* Nonzero if this class has an X(initializer_list<T>) constructor. */
#define TYPE_HAS_LIST_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor)
/* Nonzero if this class has a constexpr constructor other than a copy/move
constructor. Note that a class can have constexpr constructors for
static initialization even if it isn't a literal class. */
#define TYPE_HAS_CONSTEXPR_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor)
/* Nonzero if this class defines an overloaded operator new. (An
operator new [] doesn't count.) */
#define TYPE_HAS_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_new)
/* Nonzero if this class defines an overloaded operator new[]. */
#define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_array_new)
/* Nonzero means that this type is being defined. I.e., the left brace
starting the definition of this type has been seen. */
#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined)
/* Nonzero means that this type is either complete or being defined, so we
can do lookup in it. */
#define COMPLETE_OR_OPEN_TYPE_P(NODE) \
(COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE)))
/* Mark bits for repeated base checks. */
#define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE))
/* Nonzero if the class NODE has multiple paths to the same (virtual)
base object. */
#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped)
/* Nonzero if the class NODE has multiple instances of the same base
type. */
#define CLASSTYPE_REPEATED_BASE_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->repeated_base)
/* The member function with which the vtable will be emitted:
the first noninline non-pure-virtual member function. NULL_TREE
if there is no key function or if this is a class template */
#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method)
/* Vector member functions defined in this class. Each element is
either a FUNCTION_DECL, a TEMPLATE_DECL, or an OVERLOAD. All
functions with the same name end up in the same slot. The first
two elements are for constructors, and destructors, respectively.
All template conversion operators to innermost template dependent
types are overloaded on the next slot, if they exist. Note, the
names for these functions will not all be the same. The
non-template conversion operators & templated conversions to
non-innermost template types are next, followed by ordinary member
functions. There may be empty entries at the end of the vector.
The conversion operators are unsorted. The ordinary member
functions are sorted, once the class is complete. */
#define CLASSTYPE_METHOD_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->methods)
/* For class templates, this is a TREE_LIST of all member data,
functions, types, and friends in the order of declaration.
The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend,
and the RECORD_TYPE for the class template otherwise. */
#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list)
/* The slot in the CLASSTYPE_METHOD_VEC where constructors go. */
#define CLASSTYPE_CONSTRUCTOR_SLOT 0
/* The slot in the CLASSTYPE_METHOD_VEC where destructors go. */
#define CLASSTYPE_DESTRUCTOR_SLOT 1
/* The first slot in the CLASSTYPE_METHOD_VEC where conversion
operators can appear. */
#define CLASSTYPE_FIRST_CONVERSION_SLOT 2
/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
are the constructors that take an in-charge parameter. */
#define CLASSTYPE_CONSTRUCTORS(NODE) \
((*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_CONSTRUCTOR_SLOT])
/* A FUNCTION_DECL for the destructor for NODE. These are the
destructors that take an in-charge parameter. If
CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL
until the destructor is created with lazily_declare_fn. */
#define CLASSTYPE_DESTRUCTORS(NODE) \
(CLASSTYPE_METHOD_VEC (NODE) \
? (*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_DESTRUCTOR_SLOT] \
: NULL_TREE)
/* A dictionary of the nested user-defined-types (class-types, or enums)
found within this class. This table includes nested member class
templates. */
#define CLASSTYPE_NESTED_UTDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nested_udts)
/* Nonzero if NODE has a primary base class, i.e., a base class with
which it shares the virtual function table pointer. */
#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \
(CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE)
/* If non-NULL, this is the binfo for the primary base class, i.e.,
the base class which contains the virtual function table pointer
for this class. */
#define CLASSTYPE_PRIMARY_BINFO(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->primary_base)
/* A vector of BINFOs for the direct and indirect virtual base classes
that this type uses in a post-order depth-first left-to-right
order. (In other words, these bases appear in the order that they
should be initialized.) */
#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
/* The type corresponding to NODE when NODE is used as a base class,
i.e., NODE without virtual base classes or tail padding. */
#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base)
/* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */
#define IS_FAKE_BASE_TYPE(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \
&& CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE))
/* These are the size and alignment of the type without its virtual
base classes, for when we use this type as a base itself. */
#define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE))
/* The alignment of NODE, without its virtual bases, in bytes. */
#define CLASSTYPE_ALIGN_UNIT(NODE) \
(CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT)
/* A vec<tree> of virtual functions which cannot be inherited by
derived classes. When deriving from this type, the derived
class must provide its own definition for each of these functions. */
#define CLASSTYPE_PURE_VIRTUALS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals)
/* Nonzero means that this type is an abstract class type. */
#define ABSTRACT_CLASS_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE))
/* Nonzero means that this type has an X() constructor. */
#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_default_ctor)
/* Nonzero means that this type contains a mutable member. */
#define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable)
#define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE))
/* Nonzero means that this class type is not POD for the purpose of layout
(as defined in the ABI). This is different from the language's POD. */
#define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class)
/* Nonzero means that this class type is a non-standard-layout class. */
#define CLASSTYPE_NON_STD_LAYOUT(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout)
/* Nonzero means that this class type does have unique object
representations. */
#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations)
/* Nonzero means that this class type has
CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS computed. */
#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS_SET(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations_set)
/* Nonzero means that this class contains pod types whose default
initialization is not a zero initialization (namely, pointers to
data members). */
#define CLASSTYPE_NON_ZERO_INIT_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init)
/* Nonzero if this class is "empty" in the sense of the C++ ABI. */
#define CLASSTYPE_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->empty_p)
/* Nonzero if this class is "nearly empty", i.e., contains only a
virtual function table pointer. */
#define CLASSTYPE_NEARLY_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p)
/* Nonzero if this class contains an empty subobject. */
#define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p)
/* A list of class types of which this type is a friend. The
TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the
case of a template friend. */
#define CLASSTYPE_FRIEND_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->friend_classes)
/* A list of the classes which grant friendship to this class. */
#define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes)
/* The associated LAMBDA_EXPR that made this class. */
#define CLASSTYPE_LAMBDA_EXPR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr)
/* The extra mangling scope for this closure type. */
#define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \
(LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE)))
/* Say whether this node was declared as a "class" or a "struct". */
#define CLASSTYPE_DECLARED_CLASS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->declared_class)
/* Nonzero if this class has const members
which have no specified initialization. */
#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init : 0)
#define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init = (VALUE))
/* Nonzero if this class has ref members
which have no specified initialization. */
#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init : 0)
#define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init = (VALUE))
/* Nonzero if this class is included from a header file which employs
`#pragma interface', and it is not included in its implementation file. */
#define CLASSTYPE_INTERFACE_ONLY(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_only)
/* True if we have already determined whether or not vtables, VTTs,
typeinfo, and other similar per-class data should be emitted in
this translation unit. This flag does not indicate whether or not
these items should be emitted; it only indicates that we know one
way or the other. */
#define CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0)
/* The opposite of CLASSTYPE_INTERFACE_KNOWN. */
#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown)
#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X))
#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1)
#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0)
/* Nonzero if a _DECL node requires us to output debug info for this class. */
#define CLASSTYPE_DEBUG_REQUESTED(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->debug_requested)
/* Additional macros for inheritance information. */
/* Nonzero means that this class is on a path leading to a new vtable. */
#define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE)
/* Nonzero means B (a BINFO) has its own vtable. Any copies will not
have this flag set. */
#define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B))
/* Compare a BINFO_TYPE with another type for equality. For a binfo,
this is functionally equivalent to using same_type_p, but
measurably faster. At least one of the arguments must be a
BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If
BINFO_TYPE(T) ever stops being the main variant of the class the
binfo is for, this macro must change. */
#define SAME_BINFO_TYPE_P(A, B) ((A) == (B))
/* Any subobject that needs a new vtable must have a vptr and must not
be a non-virtual primary base (since it would then use the vtable from a
derived class and never become non-primary.) */
#define SET_BINFO_NEW_VTABLE_MARKED(B) \
(BINFO_NEW_VTABLE_MARKED (B) = 1, \
gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \
gcc_assert (TYPE_VFIELD (BINFO_TYPE (B))))
/* Nonzero if this binfo is for a dependent base - one that should not
be searched. */
#define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE)
/* Nonzero if this binfo has lost its primary base binfo (because that
is a nearly-empty virtual base that has been taken by some other
base in the complete hierarchy. */
#define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE)
/* Nonzero if this BINFO is a primary base class. */
#define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE)
/* Used by various search routines. */
#define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_0 (NODE)
/* A vec<tree_pair_s> of the vcall indices associated with the class
NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual
function. The VALUE is the index into the virtual table where the
vcall offset for that function is stored, when NODE is a virtual
base. */
#define CLASSTYPE_VCALL_INDICES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices)
/* The various vtables for the class NODE. The primary vtable will be
first, followed by the construction vtables and VTT, if any. */
#define CLASSTYPE_VTABLES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vtables)
/* The std::type_info variable representing this class, or NULL if no
such variable has been created. This field is only set for the
TYPE_MAIN_VARIANT of the class. */
#define CLASSTYPE_TYPEINFO_VAR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
/* Accessor macros for the BINFO_VIRTUALS list. */
/* The number of bytes by which to adjust the `this' pointer when
calling this virtual function. Subtract this value from the this
pointer. Always non-NULL, might be constant zero though. */
#define BV_DELTA(NODE) (TREE_PURPOSE (NODE))
/* If non-NULL, the vtable index at which to find the vcall offset
when calling this virtual function. Add the value at that vtable
index to the this pointer. */
#define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE))
/* The function to call. */
#define BV_FN(NODE) (TREE_VALUE (NODE))
/* Whether or not this entry is for a lost primary virtual base. */
#define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE
will be NULL_TREE to indicate a throw specification of `()', or
no exceptions allowed. For a noexcept specification, TREE_VALUE
is NULL_TREE and TREE_PURPOSE is the constant-expression. For
a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT
(for templates) or an OVERLOAD list of functions (for implicitly
declared functions). */
#define TYPE_RAISES_EXCEPTIONS(NODE) \
TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()'
or noexcept(true). */
#define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the
case for things declared noexcept(true) and, with -fnothrow-opt, for
throw() functions. */
#define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE)
/* The binding level associated with the namespace. */
#define NAMESPACE_LEVEL(NODE) \
(LANG_DECL_NS_CHECK (NODE)->level)
/* Flags shared by all forms of DECL_LANG_SPECIFIC.
Some of the flags live here only to make lang_decl_min/fn smaller. Do
not make this struct larger than 32 bits; instead, make sel smaller. */
struct GTY(()) lang_decl_base {
unsigned selector : 16; /* Larger than necessary for faster access. */
ENUM_BITFIELD(languages) language : 1;
unsigned use_template : 2;
unsigned not_really_extern : 1; /* var or fn */
unsigned initialized_in_class : 1; /* var or fn */
unsigned repo_available_p : 1; /* var or fn */
unsigned threadprivate_or_deleted_p : 1; /* var or fn */
unsigned anticipated_p : 1; /* fn, type or template */
/* anticipated_p reused as DECL_OMP_PRIVATIZED_MEMBER in var */
unsigned friend_or_tls : 1; /* var, fn, type or template */
unsigned template_conv_p : 1; /* var or template */
unsigned odr_used : 1; /* var or fn */
unsigned u2sel : 1;
unsigned concept_p : 1; /* applies to vars and functions */
unsigned var_declared_inline_p : 1; /* var */
unsigned decomposition_p : 1; /* var */
/* 1 spare bit */
};
/* True for DECL codes which have template info and access. */
#define LANG_DECL_HAS_MIN(NODE) \
(VAR_OR_FUNCTION_DECL_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL \
|| TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == USING_DECL)
/* DECL_LANG_SPECIFIC for the above codes. */
struct GTY(()) lang_decl_min {
struct lang_decl_base base;
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_ALIAS.
In a FUNCTION_DECL for which DECL_THUNK_P does not hold,
VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is
DECL_TEMPLATE_INFO. */
tree template_info;
union lang_decl_u2 {
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_VIRTUAL_OFFSET.
Otherwise this is DECL_ACCESS. */
tree GTY ((tag ("0"))) access;
/* For VAR_DECL in function, this is DECL_DISCRIMINATOR. */
int GTY ((tag ("1"))) discriminator;
} GTY ((desc ("%0.u.base.u2sel"))) u2;
};
/* Additional DECL_LANG_SPECIFIC information for functions. */
struct GTY(()) lang_decl_fn {
struct lang_decl_min min;
/* In an overloaded operator, this is the value of
DECL_OVERLOADED_OPERATOR_P. */
ENUM_BITFIELD (tree_code) operator_code : 16;
unsigned global_ctor_p : 1;
unsigned global_dtor_p : 1;
unsigned assignment_operator_p : 1;
unsigned static_function : 1;
unsigned pure_virtual : 1;
unsigned defaulted_p : 1;
unsigned has_in_charge_parm_p : 1;
unsigned has_vtt_parm_p : 1;
unsigned pending_inline_p : 1;
unsigned nonconverting : 1;
unsigned thunk_p : 1;
unsigned this_thunk_p : 1;
unsigned hidden_friend_p : 1;
unsigned omp_declare_reduction_p : 1;
/* 2 spare bits on 32-bit hosts, 34 on 64-bit hosts. */
/* For a non-thunk function decl, this is a tree list of
friendly classes. For a thunk function decl, it is the
thunked to function decl. */
tree befriending_classes;
/* For a non-virtual FUNCTION_DECL, this is
DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which
DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both
this pointer and result pointer adjusting thunks are
chained here. This pointer thunks to return pointer thunks
will be chained on the return pointer thunk. */
tree context;
union lang_decl_u5
{
/* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is
DECL_CLONED_FUNCTION. */
tree GTY ((tag ("0"))) cloned_function;
/* In a FUNCTION_DECL for which THUNK_P holds this is the
THUNK_FIXED_OFFSET. */
HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset;
} GTY ((desc ("%1.thunk_p"))) u5;
union lang_decl_u3
{
struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info;
struct language_function * GTY ((tag ("0")))
saved_language_function;
} GTY ((desc ("%1.pending_inline_p"))) u;
};
/* DECL_LANG_SPECIFIC for namespaces. */
struct GTY(()) lang_decl_ns {
struct lang_decl_base base;
cp_binding_level *level;
tree ns_using;
tree ns_users;
};
/* DECL_LANG_SPECIFIC for parameters. */
struct GTY(()) lang_decl_parm {
struct lang_decl_base base;
int level;
int index;
};
/* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a
union rather than a struct containing a union as its only field, but
tree.h declares it as a struct. */
struct GTY(()) lang_decl {
union GTY((desc ("%h.base.selector"))) lang_decl_u {
struct lang_decl_base GTY ((default)) base;
struct lang_decl_min GTY((tag ("0"))) min;
struct lang_decl_fn GTY ((tag ("1"))) fn;
struct lang_decl_ns GTY((tag ("2"))) ns;
struct lang_decl_parm GTY((tag ("3"))) parm;
} u;
};
/* Looks through a template (if present) to find what it declares. */
#define STRIP_TEMPLATE(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_DECL_MIN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE)) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min; })
/* We want to be able to check DECL_CONSTRUCTOR_P and such on a function
template, not just on a FUNCTION_DECL. So when looking for things in
lang_decl_fn, look down through a TEMPLATE_DECL into its result. */
#define LANG_DECL_FN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \
if (!DECL_DECLARES_FUNCTION_P (NODE) || lt->u.base.selector != 1) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.fn; })
#define LANG_DECL_NS_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != NAMESPACE_DECL || lt->u.base.selector != 2) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ns; })
#define LANG_DECL_PARM_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != PARM_DECL) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.parm; })
#define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min.u2; })
#else
#define LANG_DECL_MIN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.min)
#define LANG_DECL_FN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn)
#define LANG_DECL_NS_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.ns)
#define LANG_DECL_PARM_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.parm)
#define LANG_DECL_U2_CHECK(NODE, TF) \
(&DECL_LANG_SPECIFIC (NODE)->u.min.u2)
#endif /* ENABLE_TREE_CHECKING */
/* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the
declaration. Some entities (like a member function in a local
class, or a local variable) do not have linkage at all, and this
macro should not be used in those cases.
Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was
created by language-independent code, and has C linkage. Most
VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but
we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */
#define DECL_LANGUAGE(NODE) \
(DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.language \
: (TREE_CODE (NODE) == FUNCTION_DECL \
? lang_c : lang_cplusplus))
/* Set the language linkage for NODE to LANGUAGE. */
#define SET_DECL_LANGUAGE(NODE, LANGUAGE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE))
/* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function
is a constructor. */
#define DECL_CONSTRUCTOR_P(NODE) \
DECL_CXX_CONSTRUCTOR_P (STRIP_TEMPLATE (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete
object. */
#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base
object. */
#define DECL_BASE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
specialized in-charge constructor or the specialized not-in-charge
constructor. */
#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \
&& !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */
#define DECL_COPY_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0)
/* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */
#define DECL_MOVE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE))
/* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL)
is a destructor. */
#define DECL_DESTRUCTOR_P(NODE) \
DECL_CXX_DESTRUCTOR_P (STRIP_TEMPLATE (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
specialized in-charge constructor, in-charge deleting constructor,
or the base destructor. */
#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_DESTRUCTOR_P (NODE) \
&& !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object. */
#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base
object. */
#define DECL_BASE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object that deletes the object after it has been destroyed. */
#define DECL_DELETING_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == deleting_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or
destructor. */
#define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true))
/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
cloned. */
#define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false))
/* Perform an action for each clone of FN, if FN is a function with
clones. This macro should be used like:
FOR_EACH_CLONE (clone, fn)
{ ... }
*/
#define FOR_EACH_CLONE(CLONE, FN) \
if (!(TREE_CODE (FN) == FUNCTION_DECL \
&& (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \
|| DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))))\
; \
else \
for (CLONE = DECL_CHAIN (FN); \
CLONE && DECL_CLONED_FUNCTION_P (CLONE); \
CLONE = DECL_CHAIN (CLONE))
/* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */
#define DECL_DISCRIMINATOR_P(NODE) \
(VAR_P (NODE) && DECL_FUNCTION_SCOPE_P (NODE))
/* Discriminator for name mangling. */
#define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator)
/* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */
#define DECL_DISCRIMINATOR_SET_P(NODE) \
(DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1)
/* The index of a user-declared parameter in its function, starting at 1.
All artificial parameters will have index 0. */
#define DECL_PARM_INDEX(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->index)
/* The level of a user-declared parameter in its function, starting at 1.
A parameter of the function will have level 1; a parameter of the first
nested function declarator (i.e. t in void f (void (*p)(T t))) will have
level 2. */
#define DECL_PARM_LEVEL(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->level)
/* Nonzero if the VTT parm has been added to NODE. */
#define DECL_HAS_VTT_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p)
/* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is
required. */
#define DECL_NEEDS_VTT_PARM_P(NODE) \
(CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \
&& (DECL_BASE_CONSTRUCTOR_P (NODE) \
|| DECL_BASE_DESTRUCTOR_P (NODE)))
/* Nonzero if NODE is a user-defined conversion operator. */
#define DECL_CONV_FN_P(NODE) \
(DECL_NAME (NODE) && IDENTIFIER_TYPENAME_P (DECL_NAME (NODE)))
/* If FN is a conversion operator, the type to which it converts.
Otherwise, NULL_TREE. */
#define DECL_CONV_FN_TYPE(FN) \
(DECL_CONV_FN_P (FN) ? TREE_TYPE (DECL_NAME (FN)) : NULL_TREE)
/* Nonzero if NODE, which is a TEMPLATE_DECL, is a template
conversion operator to a type dependent on the innermost template
args. */
#define DECL_TEMPLATE_CONV_FN_P(NODE) \
(DECL_LANG_SPECIFIC (TEMPLATE_DECL_CHECK (NODE))->u.base.template_conv_p)
/* Nonzero if NODE, a static data member, was declared in its class as an
array of unknown bound. */
#define VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.template_conv_p \
: false)
#define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.template_conv_p = true)
/* Set the overloaded operator code for NODE to CODE. */
#define SET_OVERLOADED_OPERATOR_CODE(NODE, CODE) \
(LANG_DECL_FN_CHECK (NODE)->operator_code = (CODE))
/* If NODE is an overloaded operator, then this returns the TREE_CODE
associated with the overloaded operator.
DECL_ASSIGNMENT_OPERATOR_P must also be checked to determine
whether or not NODE is an assignment operator. If NODE is not an
overloaded operator, ERROR_MARK is returned. Since the numerical
value of ERROR_MARK is zero, this macro can be used as a predicate
to test whether or not NODE is an overloaded operator. */
#define DECL_OVERLOADED_OPERATOR_P(NODE) \
(IDENTIFIER_OPNAME_P (DECL_NAME (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->operator_code : ERROR_MARK)
/* Nonzero if NODE is an assignment operator (including += and such). */
#define DECL_ASSIGNMENT_OPERATOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->assignment_operator_p)
/* For FUNCTION_DECLs: nonzero means that this function is a
constructor or a destructor with an extra in-charge parameter to
control whether or not virtual bases are constructed. */
#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p)
/* Nonzero if DECL is a declaration of __builtin_constant_p. */
#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \
&& DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P)
/* Nonzero for _DECL means that this decl appears in (or will appear
in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
detecting circularity in case members are multiply defined. In the
case of a VAR_DECL, it is also used to determine how program storage
should be allocated. */
#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE))
/* Nonzero for a VAR_DECL means that the variable's initialization (if
any) has been processed. (In general, DECL_INITIALIZED_P is
!DECL_EXTERNAL, but static data members may be initialized even if
not defined.) */
#define DECL_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL iff an explicit initializer was provided
or a non-trivial constructor is called. */
#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL that was initialized with a
constant-expression. */
#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \
(TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE)))
/* Nonzero if the DECL was initialized in the class definition itself,
rather than outside the class. This is used for both static member
VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */
#define DECL_INITIALIZED_IN_CLASS_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.initialized_in_class)
/* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr].
Only available for decls with DECL_LANG_SPECIFIC. */
#define DECL_ODR_USED(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.odr_used)
/* Nonzero for DECL means that this decl is just a friend declaration,
and should not be added to the list of members for this class. */
#define DECL_FRIEND_P(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.friend_or_tls)
/* Nonzero if the thread-local variable was declared with __thread as
opposed to thread_local. */
#define DECL_GNU_TLS_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
&& DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls)
#define SET_DECL_GNU_TLS_P(NODE) \
(retrofit_lang_decl (VAR_DECL_CHECK (NODE)), \
DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls = true)
/* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */
#define DECL_BEFRIENDING_CLASSES(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* Nonzero for FUNCTION_DECL means that this decl is a static
member function. */
#define DECL_STATIC_FUNCTION_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->static_function)
/* Nonzero for FUNCTION_DECL means that this decl is a non-static
member function. */
#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
(TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
/* Nonzero for FUNCTION_DECL means that this decl is a member function
(static or non-static). */
#define DECL_FUNCTION_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as const X *const. */
#define DECL_CONST_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as volatile X *const. */
#define DECL_VOLATILE_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for a DECL means that this member is a non-static member. */
#define DECL_NONSTATIC_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL)
/* Nonzero for _DECL means that this member object type
is mutable. */
#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE))
/* Nonzero for _DECL means that this constructor or conversion function is
non-converting. */
#define DECL_NONCONVERTING_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->nonconverting)
/* Nonzero for FUNCTION_DECL means that this member function is a pure
virtual function. */
#define DECL_PURE_VIRTUAL_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pure_virtual)
/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an
invalid overrider for a function from a base class. Once we have
complained about an invalid overrider we avoid complaining about it
again. */
#define DECL_INVALID_OVERRIDER_P(NODE) \
(DECL_LANG_FLAG_4 (NODE))
/* True (in a FUNCTION_DECL) if NODE is a function declared with
an override virt-specifier */
#define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE))
/* The thunks associated with NODE, a FUNCTION_DECL. */
#define DECL_THUNKS(NODE) \
(DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set DECL_THUNKS. */
#define SET_DECL_THUNKS(NODE,THUNKS) \
(LANG_DECL_FN_CHECK (NODE)->context = (THUNKS))
/* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this
is the constructor it inherits from. */
#define DECL_INHERITED_CTOR(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \
? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* And this is the base that constructor comes from. */
#define DECL_INHERITED_CTOR_BASE(NODE) \
(DECL_INHERITED_CTOR (NODE) \
? DECL_CONTEXT (flag_new_inheriting_ctors \
? strip_inheriting_ctors (NODE) \
: DECL_INHERITED_CTOR (NODE)) \
: NULL_TREE)
/* Set the inherited base. */
#define SET_DECL_INHERITED_CTOR(NODE,INH) \
(LANG_DECL_FN_CHECK (NODE)->context = (INH))
/* Nonzero if NODE is a thunk, rather than an ordinary function. */
#define DECL_THUNK_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_LANG_SPECIFIC (NODE) \
&& LANG_DECL_FN_CHECK (NODE)->thunk_p)
/* Set DECL_THUNK_P for node. */
#define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \
(LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \
LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING))
/* Nonzero if NODE is a this pointer adjusting thunk. */
#define DECL_THIS_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a result pointer adjusting thunk. */
#define DECL_RESULT_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */
#define DECL_NON_THUNK_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE))
/* Nonzero if NODE is `extern "C"'. */
#define DECL_EXTERN_C_P(NODE) \
(DECL_LANGUAGE (NODE) == lang_c)
/* Nonzero if NODE is an `extern "C"' function. */
#define DECL_EXTERN_C_FUNCTION_P(NODE) \
(DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE))
/* True iff DECL is an entity with vague linkage whose definition is
available in this translation unit. */
#define DECL_REPO_AVAILABLE_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p)
/* True if DECL is declared 'constexpr'. */
#define DECL_DECLARED_CONSTEXPR_P(DECL) \
DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL)))
// True if NODE was declared as 'concept'. The flag implies that the
// declaration is constexpr, that the declaration cannot be specialized or
// refined, and that the result type must be convertible to bool.
#define DECL_DECLARED_CONCEPT_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.concept_p)
/* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a
template function. */
#define DECL_PRETTY_FUNCTION_P(NODE) \
(DECL_NAME (NODE) \
&& !strcmp (IDENTIFIER_POINTER (DECL_NAME (NODE)), "__PRETTY_FUNCTION__"))
/* Nonzero if the variable was declared to be thread-local.
We need a special C++ version of this test because the middle-end
DECL_THREAD_LOCAL_P uses the symtab, so we can't use it for
templates. */
#define CP_DECL_THREAD_LOCAL_P(NODE) \
(TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)))
/* The _TYPE context in which this _DECL appears. This field holds the
class where a virtual function instance is actually defined. */
#define DECL_CLASS_CONTEXT(NODE) \
(DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE)
/* For a non-member friend function, the class (if any) in which this
friend was defined. For example, given:
struct S { friend void f (); };
the DECL_FRIEND_CONTEXT for `f' will be `S'. */
#define DECL_FRIEND_CONTEXT(NODE) \
((DECL_DECLARES_FUNCTION_P (NODE) \
&& DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->context \
: NULL_TREE)
/* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */
#define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \
(LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT))
#define CP_DECL_CONTEXT(NODE) \
(!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
#define CP_TYPE_CONTEXT(NODE) \
(!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace)
#define FROB_CONTEXT(NODE) \
((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE))
/* 1 iff NODE has namespace scope, including the global namespace. */
#define DECL_NAMESPACE_SCOPE_P(NODE) \
(!DECL_TEMPLATE_PARM_P (NODE) \
&& TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL)
#define TYPE_NAMESPACE_SCOPE_P(NODE) \
(TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL)
#define NAMESPACE_SCOPE_P(NODE) \
((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \
|| (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE)))
/* 1 iff NODE is a class member. */
#define DECL_CLASS_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE)))
#define TYPE_CLASS_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE)))
/* 1 iff NODE is function-local. */
#define DECL_FUNCTION_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) \
&& TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL)
#define TYPE_FUNCTION_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL)
/* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for
both the primary typeinfo object and the associated NTBS name. */
#define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))
/* 1 iff VAR_DECL node NODE is virtual table or VTT. */
#define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */
#define FUNCTION_REF_QUALIFIED(NODE) \
TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */
#define FUNCTION_RVALUE_QUALIFIED(NODE) \
TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE))
/* Returns 1 iff VAR_DECL is a construction virtual table.
DECL_VTABLE_OR_VTT_P will be true in this case and must be checked
before using this macro. */
#define DECL_CONSTRUCTION_VTABLE_P(NODE) \
TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE))
/* 1 iff NODE is function-local, but for types. */
#define LOCAL_CLASS_P(NODE) \
(decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE)
/* For a NAMESPACE_DECL: the list of using namespace directives
The PURPOSE is the used namespace, the value is the namespace
that is the common ancestor. */
#define DECL_NAMESPACE_USING(NODE) (LANG_DECL_NS_CHECK (NODE)->ns_using)
/* In a NAMESPACE_DECL, the DECL_INITIAL is used to record all users
of a namespace, to record the transitive closure of using namespace. */
#define DECL_NAMESPACE_USERS(NODE) (LANG_DECL_NS_CHECK (NODE)->ns_users)
/* In a NAMESPACE_DECL, the list of namespaces which have associated
themselves with this one. */
#define DECL_NAMESPACE_ASSOCIATIONS(NODE) \
DECL_INITIAL (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, points to the original namespace if this is
a namespace alias. */
#define DECL_NAMESPACE_ALIAS(NODE) \
DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE))
#define ORIGINAL_NAMESPACE(NODE) \
(DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE))
/* Nonzero if NODE is the std namespace. */
#define DECL_NAMESPACE_STD_P(NODE) \
(TREE_CODE (NODE) == NAMESPACE_DECL \
&& CP_DECL_CONTEXT (NODE) == global_namespace \
&& DECL_NAME (NODE) == std_identifier)
/* In a TREE_LIST concatenating using directives, indicate indirect
directives */
#define TREE_INDIRECT_USING(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in an attribute list, indicates that the attribute
must be applied at instantiation time. */
#define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag
was inherited from a template parameter, not explicitly indicated. */
#define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
extern tree decl_shadowed_for_var_lookup (tree);
extern void decl_shadowed_for_var_insert (tree, tree);
/* Non zero if this is a using decl for a dependent scope. */
#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE))
/* The scope named in a using decl. */
#define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE))
/* The decls named by a using decl. */
#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE))
/* Non zero if the using decl refers to a dependent type. */
#define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE))
/* In a VAR_DECL, true if we have a shadowed local variable
in the shadowed var table for this VAR_DECL. */
#define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \
(VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p)
/* In a VAR_DECL for a variable declared in a for statement,
this is the shadowed (local) variable. */
#define DECL_SHADOWED_FOR_VAR(NODE) \
(DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL)
#define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \
(decl_shadowed_for_var_insert (NODE, VAL))
/* In a FUNCTION_DECL, this is nonzero if this function was defined in
the class definition. We have saved away the text of the function,
but have not yet processed it. */
#define DECL_PENDING_INLINE_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pending_inline_p)
/* If DECL_PENDING_INLINE_P holds, this is the saved text of the
function. */
#define DECL_PENDING_INLINE_INFO(NODE) \
(LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info)
/* Nonzero for TYPE_DECL means that it was written 'using name = type'. */
#define TYPE_DECL_ALIAS_P(NODE) \
DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE))
/* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */
#define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \
DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a type which is an alias for another type; i.e, a type
which declaration was written 'using name-of-type =
another-type'. */
#define TYPE_ALIAS_P(NODE) \
(TYPE_P (NODE) \
&& TYPE_NAME (NODE) \
&& TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \
&& TYPE_DECL_ALIAS_P (TYPE_NAME (NODE)))
/* For a class type: if this structure has many fields, we'll sort them
and put them into a TREE_VEC. */
#define CLASSTYPE_SORTED_FIELDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->sorted_fields)
/* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or
TEMPLATE_DECL, the entity is either a template specialization (if
DECL_USE_TEMPLATE is nonzero) or the abstract instance of the
template itself.
In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose
TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a
specialization or abstract instance. The TREE_VALUE is the
template arguments used to specialize the template.
Consider:
template <typename T> struct S { friend void f(T) {} };
In this case, S<int>::f is, from the point of view of the compiler,
an instantiation of a template -- but, from the point of view of
the language, each instantiation of S results in a wholly unrelated
global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f
will be non-NULL, but DECL_USE_TEMPLATE will be zero. */
#define DECL_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \
->u.min.template_info)
/* For a VAR_DECL, indicates that the variable is actually a
non-static data member of anonymous union that has been promoted to
variable status. */
#define DECL_ANON_UNION_VAR_P(NODE) \
(DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)))
/* Template information for a RECORD_TYPE or UNION_TYPE. */
#define CLASSTYPE_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (RECORD_OR_UNION_CHECK (NODE))->template_info)
/* Template information for an ENUMERAL_TYPE. Although an enumeration may
not be a primary template, it may be declared within the scope of a
primary template and the enumeration constants may depend on
non-type template parameters. */
#define ENUM_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (ENUMERAL_TYPE_CHECK (NODE)))
/* Template information for a template template parameter. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)) \
->template_info)
/* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or
BOUND_TEMPLATE_TEMPLATE_PARM type. This ignores any alias
templateness of NODE. */
#define TYPE_TEMPLATE_INFO(NODE) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
? ENUM_TEMPLATE_INFO (NODE) \
: (TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \
? TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (NODE) \
: (CLASS_TYPE_P (NODE) \
? CLASSTYPE_TEMPLATE_INFO (NODE) \
: NULL_TREE)))
/* Template information (if any) for an alias type. */
#define TYPE_ALIAS_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \
: NULL_TREE)
/* If NODE is a type alias, this accessor returns the template info
for the alias template (if any). Otherwise behave as
TYPE_TEMPLATE_INFO. */
#define TYPE_TEMPLATE_INFO_MAYBE_ALIAS(NODE) \
(TYPE_ALIAS_P (NODE) \
? TYPE_ALIAS_TEMPLATE_INFO (NODE) \
: TYPE_TEMPLATE_INFO (NODE))
/* Set the template information for an ENUMERAL_, RECORD_, or
UNION_TYPE to VAL. */
#define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
? (ENUM_TEMPLATE_INFO (NODE) = (VAL)) \
: ((CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \
? (CLASSTYPE_TEMPLATE_INFO (NODE) = (VAL)) \
: (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL))))
#define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE))
#define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE))
#define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE)
/* For a given TREE_VEC containing a template argument list,
this property contains the number of arguments that are not
defaulted. */
#define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE))
/* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT
property. */
#define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE)
#if CHECKING_P
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE))
#else
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \
? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \
: TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE))
#endif
/* The list of typedefs - used in the template - that need
access checking at template instantiation time.
FIXME this should be associated with the TEMPLATE_DECL, not the
TEMPLATE_INFO. */
#define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \
((struct tree_template_info*)TEMPLATE_INFO_CHECK \
(NODE))->typedefs_needing_access_checking
/* We use TREE_VECs to hold template arguments. If there is only one
level of template arguments, then the TREE_VEC contains the
arguments directly. If there is more than one level of template
arguments, then each entry in the TREE_VEC is itself a TREE_VEC,
containing the template arguments for a single level. The first
entry in the outer TREE_VEC is the outermost level of template
parameters; the last is the innermost.
It is incorrect to ever form a template argument vector containing
only one level of arguments, but which is a TREE_VEC containing as
its only entry the TREE_VEC for that level.
For each TREE_VEC containing the template arguments for a single
level, it's possible to get or set the number of non defaulted
template arguments by using the accessor macros
GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */
/* Nonzero if the template arguments is actually a vector of vectors,
rather than just a vector. */
#define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \
(NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \
&& TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC)
/* The depth of a template argument vector. When called directly by
the parser, we use a TREE_LIST rather than a TREE_VEC to represent
template arguments. In fact, we may even see NULL_TREE if there
are no template arguments. In both of those cases, there is only
one level of template arguments. */
#define TMPL_ARGS_DEPTH(NODE) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1)
/* The LEVELth level of the template ARGS. The outermost level of
args is level 1, not level 0. */
#define TMPL_ARGS_LEVEL(ARGS, LEVEL) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \
? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS))
/* Set the LEVELth level of the template ARGS to VAL. This macro does
not work with single-level argument vectors. */
#define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \
(TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL))
/* Accesses the IDXth parameter in the LEVELth level of the ARGS. */
#define TMPL_ARG(ARGS, LEVEL, IDX) \
(TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX))
/* Given a single level of template arguments in NODE, return the
number of arguments. */
#define NUM_TMPL_ARGS(NODE) \
(TREE_VEC_LENGTH (NODE))
/* Returns the innermost level of template arguments in ARGS. */
#define INNERMOST_TEMPLATE_ARGS(NODE) \
(get_innermost_template_args ((NODE), 1))
/* The number of levels of template parameters given by NODE. */
#define TMPL_PARMS_DEPTH(NODE) \
((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE)))
/* The TEMPLATE_DECL instantiated or specialized by NODE. This
TEMPLATE_DECL will be the immediate parent, not the most general
template. For example, in:
template <class T> struct S { template <class U> void f(U); }
the FUNCTION_DECL for S<int>::f<double> will have, as its
DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'.
As a special case, for a member friend template of a template
class, this value will not be a TEMPLATE_DECL, but rather an
IDENTIFIER_NODE or OVERLOAD indicating the name of the template and
any explicit template arguments provided. For example, in:
template <class T> struct S { friend void f<int>(int, double); }
the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the
DECL_TI_ARGS will be {int}.
For a FIELD_DECL with a non-static data member initializer, this value
is the FIELD_DECL it was instantiated from. */
#define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE))
/* The template arguments used to obtain this decl from the most
general form of DECL_TI_TEMPLATE. For the example given for
DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These
are always the full set of arguments required to instantiate this
declaration from the most general template specialized here. */
#define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE))
/* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE
will be generated from a partial specialization, the TEMPLATE_DECL
referred to here will be the original template. For example,
given:
template <typename T> struct S {};
template <typename T> struct S<T*> {};
the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */
#define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE))
#define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE))
/* For a template instantiation TYPE, returns the TYPE corresponding
to the primary template. Otherwise returns TYPE itself. */
#define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \
((CLASSTYPE_USE_TEMPLATE ((TYPE)) \
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \
? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \
(CLASSTYPE_TI_TEMPLATE ((TYPE))))) \
: (TYPE))
/* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */
#define TYPE_TI_TEMPLATE(NODE) \
(TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE)))
/* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */
#define TYPE_TI_ARGS(NODE) \
(TI_ARGS (TYPE_TEMPLATE_INFO (NODE)))
#define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE)
/* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the
sense of [temp.mem]. */
#define DECL_MEMBER_TEMPLATE_P(NODE) \
(DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE)))
/* Nonzero if the NODE corresponds to the template parameters for a
member template, whose inline definition is being processed after
the class definition is complete. */
#define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE)
/* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */
#define DECL_PACK_P(NODE) \
(DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE)))
/* Determines if NODE is an expansion of one or more parameter packs,
e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_P(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
|| TREE_CODE (NODE) == EXPR_PACK_EXPANSION)
/* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_PATTERN(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* The list of parameter packs used in the PACK_EXPANSION_* node. The
TREE_VALUE of each TREE_LIST contains the parameter packs. */
#define PACK_EXPANSION_PARAMETER_PACKS(NODE) \
*(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \
? &TREE_OPERAND (NODE, 1) \
: &TYPE_MINVAL (TYPE_PACK_EXPANSION_CHECK (NODE)))
/* Any additional template args to be applied when substituting into
the pattern, set by tsubst_pack_expansion for partial instantiations. */
#define PACK_EXPANSION_EXTRA_ARGS(NODE) \
*(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
? &TYPE_MAXVAL (NODE) \
: &TREE_OPERAND ((NODE), 2))
/* True iff this pack expansion is within a function context. */
#define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* True iff this pack expansion is for sizeof.... */
#define PACK_EXPANSION_SIZEOF_P(NODE) TREE_LANG_FLAG_1 (NODE)
/* True iff the wildcard can match a template parameter pack. */
#define WILDCARD_PACK_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* Determine if this is an argument pack. */
#define ARGUMENT_PACK_P(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \
|| TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK)
/* The arguments stored in an argument pack. Arguments are stored in a
TREE_VEC, which may have length zero. */
#define ARGUMENT_PACK_ARGS(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Set the arguments stored in an argument pack. VALUE must be a
TREE_VEC. */
#define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* Whether the argument pack is "incomplete", meaning that more
arguments can still be deduced. Incomplete argument packs are only
used when the user has provided an explicit template argument list
for a variadic function template. Some of the explicit template
arguments will be placed into the beginning of the argument pack,
but additional arguments might still be deduced. */
#define ARGUMENT_PACK_INCOMPLETE_P(NODE) \
TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE))
/* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template
arguments used to fill this pack. */
#define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \
TREE_TYPE (ARGUMENT_PACK_ARGS (NODE))
/* In an ARGUMENT_PACK_SELECT, the argument pack from which an
argument will be selected. */
#define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack)
/* In an ARGUMENT_PACK_SELECT, the index of the argument we want to
select. */
#define ARGUMENT_PACK_SELECT_INDEX(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index)
/* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the
ARGUMENT_PACK_SELECT represents. */
#define ARGUMENT_PACK_SELECT_ARG(NODE) \
TREE_VEC_ELT (ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (NODE)), \
ARGUMENT_PACK_SELECT_INDEX (NODE))
#define FOLD_EXPR_CHECK(NODE) \
TREE_CHECK4 (NODE, UNARY_LEFT_FOLD_EXPR, UNARY_RIGHT_FOLD_EXPR, \
BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR)
#define BINARY_FOLD_EXPR_CHECK(NODE) \
TREE_CHECK2 (NODE, BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR)
/* True if NODE is UNARY_FOLD_EXPR or a BINARY_FOLD_EXPR */
#define FOLD_EXPR_P(NODE) \
(TREE_CODE (NODE) == UNARY_LEFT_FOLD_EXPR \
|| TREE_CODE (NODE) == UNARY_RIGHT_FOLD_EXPR \
|| TREE_CODE (NODE) == BINARY_LEFT_FOLD_EXPR \
|| TREE_CODE (NODE) == BINARY_RIGHT_FOLD_EXPR)
/* True when NODE is a fold over a compound assignment operator. */
#define FOLD_EXPR_MODIFY_P(NODE) \
TREE_LANG_FLAG_0 (FOLD_EXPR_CHECK (NODE))
/* An INTEGER_CST containing the tree code of the folded operator. */
#define FOLD_EXPR_OP(NODE) \
TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 0)
/* The expression containing an unexpanded parameter pack. */
#define FOLD_EXPR_PACK(NODE) \
TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 1)
/* In a binary fold expression, the argument with no unexpanded
parameter packs. */
#define FOLD_EXPR_INIT(NODE) \
TREE_OPERAND (BINARY_FOLD_EXPR_CHECK (NODE), 2)
/* In a FUNCTION_DECL, the saved language-specific per-function data. */
#define DECL_SAVED_FUNCTION_DATA(NODE) \
(LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \
->u.saved_language_function)
/* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */
#define REFERENCE_REF_P(NODE) \
(INDIRECT_REF_P (NODE) \
&& TREE_TYPE (TREE_OPERAND (NODE, 0)) \
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \
== REFERENCE_TYPE))
/* True if NODE is a REFERENCE_TYPE which is OK to instantiate to be a
reference to VLA type, because it's used for VLA capture. */
#define REFERENCE_VLA_OK(NODE) \
(TYPE_LANG_FLAG_5 (REFERENCE_TYPE_CHECK (NODE)))
#define NEW_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_VEC(NODE) \
TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE))
#define CALL_OR_AGGR_INIT_CHECK(NODE) \
TREE_CHECK2 ((NODE), CALL_EXPR, AGGR_INIT_EXPR)
/* Indicates that this is a non-dependent COMPOUND_EXPR which will
resolve to a function call. */
#define COMPOUND_EXPR_OVERLOADED(NODE) \
TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE))
/* In a CALL_EXPR appearing in a template, true if Koenig lookup
should be performed at instantiation time. */
#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE))
/* True if the arguments to NODE should be evaluated in left-to-right
order regardless of PUSH_ARGS_REVERSED. */
#define CALL_EXPR_ORDERED_ARGS(NODE) \
TREE_LANG_FLAG_3 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* True if the arguments to NODE should be evaluated in right-to-left
order regardless of PUSH_ARGS_REVERSED. */
#define CALL_EXPR_REVERSE_ARGS(NODE) \
TREE_LANG_FLAG_5 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* True if CALL_EXPR was written as an operator expression, not a function
call. */
#define CALL_EXPR_OPERATOR_SYNTAX(NODE) \
TREE_LANG_FLAG_6 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* Indicates whether a string literal has been parenthesized. Such
usages are disallowed in certain circumstances. */
#define PAREN_STRING_LITERAL_P(NODE) \
TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE))
/* Indicates whether a COMPONENT_REF or a SCOPE_REF has been parenthesized, or
an INDIRECT_REF comes from parenthesizing a _DECL. Currently only set some
of the time in C++14 mode. */
#define REF_PARENTHESIZED_P(NODE) \
TREE_LANG_FLAG_2 (TREE_CHECK3 ((NODE), COMPONENT_REF, INDIRECT_REF, SCOPE_REF))
/* Nonzero if this AGGR_INIT_EXPR provides for initialization via a
constructor call, rather than an ordinary function call. */
#define AGGR_INIT_VIA_CTOR_P(NODE) \
TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize
the object. */
#define AGGR_INIT_ZERO_FIRST(NODE) \
TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero means that the call is the jump from a thunk to the
thunked-to function. */
#define AGGR_INIT_FROM_THUNK_P(NODE) \
(AGGR_INIT_EXPR_CHECK (NODE)->base.protected_flag)
/* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR
accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of
CALL_EXPR_STATIC_CHAIN). */
#define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1)
#define AGGR_INIT_EXPR_SLOT(NODE) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2)
#define AGGR_INIT_EXPR_ARG(NODE, I) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3)
#define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3)
/* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE.
We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if
the argument count is zero when checking is enabled. Instead, do
the pointer arithmetic to advance past the 3 fixed operands in a
AGGR_INIT_EXPR. That produces a valid pointer to just past the end of
the operand array, even if it's not valid to dereference it. */
#define AGGR_INIT_EXPR_ARGP(NODE) \
(&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3)
/* Abstract iterators for AGGR_INIT_EXPRs. */
/* Structure containing iterator state. */
struct aggr_init_expr_arg_iterator {
tree t; /* the aggr_init_expr */
int n; /* argument count */
int i; /* next argument index */
};
/* Initialize the abstract argument list iterator object ITER with the
arguments from AGGR_INIT_EXPR node EXP. */
inline void
init_aggr_init_expr_arg_iterator (tree exp,
aggr_init_expr_arg_iterator *iter)
{
iter->t = exp;
iter->n = aggr_init_expr_nargs (exp);
iter->i = 0;
}
/* Return the next argument from abstract argument list iterator object ITER,
and advance its state. Return NULL_TREE if there are no more arguments. */
inline tree
next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter)
{
tree result;
if (iter->i >= iter->n)
return NULL_TREE;
result = AGGR_INIT_EXPR_ARG (iter->t, iter->i);
iter->i++;
return result;
}
/* Initialize the abstract argument list iterator object ITER, then advance
past and return the first argument. Useful in for expressions, e.g.
for (arg = first_aggr_init_expr_arg (exp, &iter); arg;
arg = next_aggr_init_expr_arg (&iter)) */
inline tree
first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter)
{
init_aggr_init_expr_arg_iterator (exp, iter);
return next_aggr_init_expr_arg (iter);
}
/* Test whether there are more arguments in abstract argument list iterator
ITER, without changing its state. */
inline bool
more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
{
return (iter->i < iter->n);
}
/* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable
ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */
#define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \
for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \
(arg) = next_aggr_init_expr_arg (&(iter)))
/* VEC_INIT_EXPR accessors. */
#define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0)
#define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1)
/* Indicates that a VEC_INIT_EXPR is a potential constant expression.
Only set when the current function is constexpr. */
#define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \
TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE))
/* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */
#define VEC_INIT_EXPR_VALUE_INIT(NODE) \
TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE))
/* The condition under which this MUST_NOT_THROW_EXPR actually blocks
exceptions. NULL_TREE means 'true'. */
#define MUST_NOT_THROW_COND(NODE) \
TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1)
/* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a
TEMPLATE_DECL. This macro determines whether or not a given class
type is really a template type, as opposed to an instantiation or
specialization of one. */
#define CLASSTYPE_IS_TEMPLATE(NODE) \
(CLASSTYPE_TEMPLATE_INFO (NODE) \
&& !CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
/* The name used by the user to name the typename type. Typically,
this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the
corresponding TYPE_DECL. However, this may also be a
TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */
#define TYPENAME_TYPE_FULLNAME(NODE) \
(TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as an "enum". */
#define TYPENAME_IS_ENUM_P(NODE) \
(TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as a "class", "struct", or
"union". */
#define TYPENAME_IS_CLASS_P(NODE) \
(TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE is in the process of being resolved. */
#define TYPENAME_IS_RESOLVING_P(NODE) \
(TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE)))
/* [class.virtual]
A class that declares or inherits a virtual function is called a
polymorphic class. */
#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE))
/* Nonzero if this class has a virtual function table pointer. */
#define TYPE_CONTAINS_VPTR_P(NODE) \
(TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE))
/* This flag is true of a local VAR_DECL if it was declared in a for
statement, but we are no longer in the scope of the for. */
#define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE))
/* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL
if we already emitted a warning about using it. */
#define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))
/* Nonzero if NODE is a FUNCTION_DECL (for a function with global
scope) declared in a local scope. */
#define DECL_LOCAL_FUNCTION_P(NODE) \
DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'break' stmts. */
#define LABEL_DECL_BREAK(NODE) \
DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'continue' stmts. */
#define LABEL_DECL_CONTINUE(NODE) \
DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE))
/* True if NODE was declared with auto in its return type, but it has
started compilation and so the return type might have been changed by
return type deduction; its declared return type should be found in
DECL_STRUCT_FUNCTION(NODE)->language->x_auto_return_pattern. */
#define FNDECL_USED_AUTO(NODE) \
TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is a DECL which we know about but which has not
been explicitly declared, such as a built-in function or a friend
declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P
will be set. */
#define DECL_ANTICIPATED(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.anticipated_p)
/* True for artificial decls added for OpenMP privatized non-static
data members. */
#define DECL_OMP_PRIVATIZED_MEMBER(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.anticipated_p)
/* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend
within a class but has not been declared in the surrounding scope.
The function is invisible except via argument dependent lookup. */
#define DECL_HIDDEN_FRIEND_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p)
/* Nonzero if NODE is an artificial FUNCTION_DECL for
#pragma omp declare reduction. */
#define DECL_OMP_DECLARE_REDUCTION_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p)
/* Nonzero if DECL has been declared threadprivate by
#pragma omp threadprivate. */
#define CP_DECL_THREADPRIVATE_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p)
/* Nonzero if NODE is a VAR_DECL which has been declared inline. */
#define DECL_VAR_DECLARED_INLINE_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.var_declared_inline_p \
: false)
#define SET_DECL_VAR_DECLARED_INLINE_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.var_declared_inline_p \
= true)
/* Nonzero if NODE is an artificial VAR_DECL for a C++17 decomposition
declaration. */
#define DECL_DECOMPOSITION_P(NODE) \
(VAR_P (NODE) && DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.decomposition_p \
: false)
#define SET_DECL_DECOMPOSITION_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.decomposition_p \
= true)
/* Nonzero if NODE is an inline VAR_DECL. In C++17, static data members
declared with constexpr specifier are implicitly inline variables. */
#define DECL_INLINE_VAR_P(NODE) \
(DECL_VAR_DECLARED_INLINE_P (NODE) \
|| (cxx_dialect >= cxx1z \
&& DECL_DECLARED_CONSTEXPR_P (NODE) \
&& DECL_CLASS_SCOPE_P (NODE)))
/* Nonzero if DECL was declared with '= delete'. */
#define DECL_DELETED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= default' (maybe implicitly). */
#define DECL_DEFAULTED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->defaulted_p)
/* Nonzero if DECL is explicitly defaulted in the class body. */
#define DECL_DEFAULTED_IN_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL))
/* Nonzero if DECL was defaulted outside the class body. */
#define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) \
&& !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL)))
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* Returns nonzero if DECL has external linkage, as specified by the
language standard. (This predicate may hold even when the
corresponding entity is not actually given external linkage in the
object file; see decl_linkage for details.) */
#define DECL_EXTERNAL_LINKAGE_P(DECL) \
(decl_linkage (DECL) == lk_external)
/* Keep these codes in ascending code order. */
#define INTEGRAL_CODE_P(CODE) \
((CODE) == ENUMERAL_TYPE \
|| (CODE) == BOOLEAN_TYPE \
|| (CODE) == INTEGER_TYPE)
/* [basic.fundamental]
Types bool, char, wchar_t, and the signed and unsigned integer types
are collectively called integral types.
Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration
types as well, which is incorrect in C++. Keep these checks in
ascending code order. */
#define CP_INTEGRAL_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == BOOLEAN_TYPE \
|| TREE_CODE (TYPE) == INTEGER_TYPE)
/* Returns true if TYPE is an integral or enumeration name. Keep
these checks in ascending code order. */
#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE))
/* Returns true if TYPE is an integral or unscoped enumeration type. */
#define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \
(UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE))
/* True if the class type TYPE is a literal type. */
#define CLASSTYPE_LITERAL_P(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->is_literal)
/* [basic.fundamental]
Integral and floating types are collectively called arithmetic
types.
As a GNU extension, we also accept complex types.
Keep these checks in ascending code order. */
#define ARITHMETIC_TYPE_P(TYPE) \
(CP_INTEGRAL_TYPE_P (TYPE) \
|| TREE_CODE (TYPE) == REAL_TYPE \
|| TREE_CODE (TYPE) == COMPLEX_TYPE)
/* True iff TYPE is cv decltype(nullptr). */
#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE)
/* [basic.types]
Arithmetic types, enumeration types, pointer types,
pointer-to-member types, and std::nullptr_t are collectively called
scalar types.
Keep these checks in ascending code order. */
#define SCALAR_TYPE_P(TYPE) \
(TYPE_PTRDATAMEM_P (TYPE) \
|| TREE_CODE (TYPE) == ENUMERAL_TYPE \
|| ARITHMETIC_TYPE_P (TYPE) \
|| TYPE_PTR_P (TYPE) \
|| TYPE_PTRMEMFUNC_P (TYPE) \
|| NULLPTR_TYPE_P (TYPE))
/* Determines whether this type is a C++0x scoped enumeration
type. Scoped enumerations types are introduced via "enum class" or
"enum struct", e.g.,
enum class Color {
Red, Green, Blue
};
Scoped enumeration types are different from normal (unscoped)
enumeration types in several ways:
- The enumerators of a scoped enumeration type are only available
within the scope of the enumeration type and not in the
enclosing scope. For example, the Red color can be referred to
with "Color::Red" but not "Red".
- Scoped enumerators and enumerations do not implicitly convert
to integers or 'bool'.
- The underlying type of the enum is well-defined. */
#define SCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE))
/* Determine whether this is an unscoped enumeration type. */
#define UNSCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE))
/* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped
enumeration type (1) or a normal (unscoped) enumeration type
(0). */
#define SET_SCOPED_ENUM_P(TYPE, VAL) \
(ENUM_IS_SCOPED (TYPE) = (VAL))
#define SET_OPAQUE_ENUM_P(TYPE, VAL) \
(ENUM_IS_OPAQUE (TYPE) = (VAL))
#define OPAQUE_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE))
/* Determines whether an ENUMERAL_TYPE has an explicit
underlying type. */
#define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE))
/* Returns the underlying type of the given enumeration type. The
underlying type is determined in different ways, depending on the
properties of the enum:
- In C++0x, the underlying type can be explicitly specified, e.g.,
enum E1 : char { ... } // underlying type is char
- In a C++0x scoped enumeration, the underlying type is int
unless otherwises specified:
enum class E2 { ... } // underlying type is int
- Otherwise, the underlying type is determined based on the
values of the enumerators. In this case, the
ENUM_UNDERLYING_TYPE will not be set until after the definition
of the enumeration is completed by finish_enum. */
#define ENUM_UNDERLYING_TYPE(TYPE) \
TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE))
/* [dcl.init.aggr]
An aggregate is an array or a class with no user-provided
constructors, no brace-or-equal-initializers for non-static data
members, no private or protected non-static data members, no
base classes, and no virtual functions.
As an extension, we also treat vectors as aggregates. Keep these
checks in ascending code order. */
#define CP_AGGREGATE_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == VECTOR_TYPE \
||TREE_CODE (TYPE) == ARRAY_TYPE \
|| (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE)))
/* Nonzero for a class type means that the class type has a
user-declared constructor. */
#define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE))
/* Nonzero means that the FUNCTION_TYPE or METHOD_TYPE has a
late-specified return type. */
#define TYPE_HAS_LATE_RETURN_TYPE(NODE) \
(TYPE_LANG_FLAG_2 (FUNC_OR_METHOD_CHECK (NODE)))
/* When appearing in an INDIRECT_REF, it means that the tree structure
underneath is actually a call to a constructor. This is needed
when the constructor must initialize local storage (which can
be automatically destroyed), rather than allowing it to allocate
space from the heap.
When appearing in a SAVE_EXPR, it means that underneath
is a call to a constructor.
When appearing in a CONSTRUCTOR, the expression is a
compound literal.
When appearing in a FIELD_DECL, it means that this field
has been duly initialized in its constructor. */
#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE))
/* True if NODE is a brace-enclosed initializer. */
#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node)
/* True if NODE is a compound-literal, i.e., a brace-enclosed
initializer cast to a particular type. */
#define COMPOUND_LITERAL_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
&& vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\
&& !TREE_HAS_CONSTRUCTOR (NODE))
/* True if NODE is a init-list used as a direct-initializer, i.e.
B b{1,2}, not B b({1,2}) or B b = {1,2}. */
#define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE)))
/* True if an uninitialized element in NODE should not be treated as
implicitly value-initialized. Only used in constexpr evaluation. */
#define CONSTRUCTOR_NO_IMPLICIT_ZERO(NODE) \
(TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (NODE)))
/* True if this CONSTRUCTOR should not be used as a variable initializer
because it was loaded from a constexpr variable with mutable fields. */
#define CONSTRUCTOR_MUTABLE_POISON(NODE) \
(TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE)))
#define DIRECT_LIST_INIT_P(NODE) \
(BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE))
/* True if NODE represents a conversion for direct-initialization in a
template. Set by perform_implicit_conversion_flags. */
#define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \
(TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
/* Nonzero means that an object of this type can not be initialized using
an initializer list. */
#define CLASSTYPE_NON_AGGREGATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate)
#define TYPE_NON_AGGREGATE_CLASS(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE))
/* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign)
/* Nonzero if there is a non-trivial X::X(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor)
/* Nonzero if there is a non-trivial X::op=(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign)
/* Nonzero if there is a non-trivial X::X(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor)
/* Nonzero if there is no trivial default constructor for this class. */
#define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt)
/* Nonzero if TYPE has a trivial destructor. From [class.dtor]:
A destructor is trivial if it is an implicitly declared
destructor and if:
- all of the direct base classes of its class have trivial
destructors,
- for all of the non-static data members of its class that are
of class type (or array thereof), each such class has a
trivial destructor. */
#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \
(!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE))
/* Nonzero for _TYPE node means that this type does not have a trivial
destructor. Therefore, destroying an object of this type will
involve a call to a destructor. This can apply to objects of
ARRAY_TYPE is the type of the elements needs a destructor. */
#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \
(TYPE_LANG_FLAG_4 (NODE))
/* Nonzero for class type means that the default constructor is trivial. */
#define TYPE_HAS_TRIVIAL_DFLT(NODE) \
(TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE))
/* Nonzero for class type means that copy initialization of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \
(TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE))
/* Nonzero for class type means that assignment of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \
(TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE))
/* Returns true if NODE is a pointer-to-data-member. */
#define TYPE_PTRDATAMEM_P(NODE) \
(TREE_CODE (NODE) == OFFSET_TYPE)
/* Returns true if NODE is a pointer. */
#define TYPE_PTR_P(NODE) \
(TREE_CODE (NODE) == POINTER_TYPE)
/* Returns true if NODE is an object type:
[basic.types]
An object type is a (possibly cv-qualified) type that is not a
function type, not a reference type, and not a void type.
Keep these checks in ascending order, for speed. */
#define TYPE_OBJ_P(NODE) \
(TREE_CODE (NODE) != REFERENCE_TYPE \
&& !VOID_TYPE_P (NODE) \
&& TREE_CODE (NODE) != FUNCTION_TYPE \
&& TREE_CODE (NODE) != METHOD_TYPE)
/* Returns true if NODE is a pointer to an object. Keep these checks
in ascending tree code order. */
#define TYPE_PTROB_P(NODE) \
(TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a reference to an object. Keep these checks
in ascending tree code order. */
#define TYPE_REF_OBJ_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a pointer to an object, or a pointer to
void. Keep these checks in ascending tree code order. */
#define TYPE_PTROBV_P(NODE) \
(TYPE_PTR_P (NODE) \
&& !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \
|| TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE))
/* Returns true if NODE is a pointer to function type. */
#define TYPE_PTRFN_P(NODE) \
(TYPE_PTR_P (NODE) \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a reference to function type. */
#define TYPE_REFFN_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a pointer to member function type. */
#define TYPE_PTRMEMFUNC_P(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_PTRMEMFUNC_FLAG (NODE))
#define TYPE_PTRMEMFUNC_FLAG(NODE) \
(TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE)))
/* Returns true if NODE is a pointer-to-member. */
#define TYPE_PTRMEM_P(NODE) \
(TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE))
/* Returns true if NODE is a pointer or a pointer-to-member. */
#define TYPE_PTR_OR_PTRMEM_P(NODE) \
(TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE))
/* Indicates when overload resolution may resolve to a pointer to
member function. [expr.unary.op]/3 */
#define PTRMEM_OK_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF))
/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
before using this macro. */
#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \
(cp_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)),\
cp_type_quals (NODE)))
/* As above, but can be used in places that want an lvalue at the expense
of not necessarily having the correct cv-qualifiers. */
#define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) \
(TREE_TYPE (TYPE_FIELDS (NODE)))
/* Returns `A' for a type like `int (A::*)(double)' */
#define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \
TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* These are use to manipulate the canonical RECORD_TYPE from the
hashed POINTER_TYPE, and can only be used on the POINTER_TYPE. */
#define TYPE_GET_PTRMEMFUNC_TYPE(NODE) \
(TYPE_LANG_SPECIFIC (NODE) ? LANG_TYPE_PTRMEM_CHECK (NODE)->record : NULL)
#define TYPE_SET_PTRMEMFUNC_TYPE(NODE, VALUE) \
do { \
if (TYPE_LANG_SPECIFIC (NODE) == NULL) \
{ \
TYPE_LANG_SPECIFIC (NODE) \
= (struct lang_type *) ggc_internal_cleared_alloc \
(sizeof (struct lang_type_ptrmem)); \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.h.is_lang_type_class = 0; \
} \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.record = (VALUE); \
} while (0)
/* For a pointer-to-member type of the form `T X::*', this is `X'.
For a type like `void (X::*)() const', this type is `X', not `const
X'. To get at the `const X' you have to look at the
TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have
type `const X*'. */
#define TYPE_PTRMEM_CLASS_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TYPE_OFFSET_BASETYPE (NODE) \
: TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE))
/* For a pointer-to-member type of the form `T X::*', this is `T'. */
#define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TREE_TYPE (NODE) \
: TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for
`X'. */
#define PTRMEM_CST_CLASS(NODE) \
TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE)))
/* For a pointer-to-member constant `X::Y' this is the _DECL for
`Y'. */
#define PTRMEM_CST_MEMBER(NODE) (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member)
/* The expression in question for a TYPEOF_TYPE. */
#define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE)))
/* The type in question for an UNDERLYING_TYPE. */
#define UNDERLYING_TYPE_TYPE(NODE) \
(TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE)))
/* The type in question for BASES. */
#define BASES_TYPE(NODE) \
(TYPE_VALUES_RAW (BASES_CHECK (NODE)))
#define BASES_DIRECT(NODE) \
TREE_LANG_FLAG_0 (BASES_CHECK (NODE))
/* The expression in question for a DECLTYPE_TYPE. */
#define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE)))
/* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an
id-expression or a member-access expression. When false, it was
parsed as a full expression. */
#define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \
(DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag
/* These flags indicate that we want different semantics from normal
decltype: lambda capture just drops references, init capture
uses auto semantics, lambda proxies look through implicit dereference. */
#define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \
TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_INIT_CAPTURE(NODE) \
TREE_LANG_FLAG_1 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \
TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_REF_CAPTURE(NODE) \
TREE_LANG_FLAG_3 (DECLTYPE_TYPE_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_EXTERN(NODE) \
DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_STATIC(NODE) \
DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a lambda capture
field for an array of runtime bound. */
#define DECL_VLA_CAPTURE_P(NODE) \
DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE))
/* Nonzero for PARM_DECL node means that this is an array function
parameter, i.e, a[] rather than *a. */
#define DECL_ARRAY_PARAMETER_P(NODE) \
DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE))
/* Nonzero for a FIELD_DECL who's NSMDI is currently being
instantiated. */
#define DECL_INSTANTIATING_NSDMI_P(NODE) \
DECL_LANG_FLAG_2 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a base class
of the parent object, as opposed to a member field. */
#define DECL_FIELD_IS_BASE(NODE) \
DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a simple (no
explicit initializer) lambda capture field, making it invisible to
name lookup in unevaluated contexts. */
#define DECL_NORMAL_CAPTURE_P(NODE) \
DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE))
/* Nonzero if TYPE is an anonymous union or struct type. We have to use a
flag for this because "A union for which objects or pointers are
declared is not an anonymous union" [class.union]. */
#define ANON_AGGR_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr)
#define SET_ANON_AGGR_TYPE_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1)
/* Nonzero if TYPE is an anonymous union type. */
#define ANON_UNION_TYPE_P(NODE) \
(TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE))
/* Define fields and accessors for nodes representing declared names. */
/* Nonzero if TYPE is an unnamed class with a typedef for linkage purposes. */
#define TYPE_WAS_UNNAMED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous)
/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */
/* The format of each node in the DECL_FRIENDLIST is as follows:
The TREE_PURPOSE will be the name of a function, i.e., an
IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose
TREE_VALUEs are friends with the given name. */
#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE))
#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST))
#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST))
/* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of
each node is a type; the TREE_VALUE is the access granted for this
DECL in that type. The DECL_ACCESS is set by access declarations.
For example, if a member that would normally be public in a
derived class is made protected, then the derived class and the
protected_access_node will appear in the DECL_ACCESS for the node. */
#define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access)
/* Nonzero if the FUNCTION_DECL is a global constructor. */
#define DECL_GLOBAL_CTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_ctor_p)
/* Nonzero if the FUNCTION_DECL is a global destructor. */
#define DECL_GLOBAL_DTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_dtor_p)
/* Accessor macros for C++ template decl nodes. */
/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node
is a INT_CST whose TREE_INT_CST_LOW indicates the level of the
template parameters, with 1 being the outermost set of template
parameters. The TREE_VALUE is a vector, whose elements are the
template parameters at each level. Each element in the vector is a
TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a
non-type parameter), or a TYPE_DECL (if the parameter is a type
parameter). The TREE_PURPOSE is the default value, if any. The
TEMPLATE_PARM_INDEX for the parameter is available as the
DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a
TYPE_DECL).
FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
tree is converted to C++ class hiearchy. */
#define DECL_TEMPLATE_PARMS(NODE) \
((struct tree_template_decl *)CONST_CAST_TREE (TEMPLATE_DECL_CHECK (NODE)))->arguments
#define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \
INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE))
#define DECL_NTPARMS(NODE) \
TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE))
/* For function, method, class-data templates.
FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
tree is converted to C++ class hiearchy. */
#define DECL_TEMPLATE_RESULT(NODE) \
((struct tree_template_decl *)CONST_CAST_TREE(TEMPLATE_DECL_CHECK (NODE)))->result
/* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS
lists all instantiations and specializations of the function so that
tsubst_friend_function can reassign them to another template if we find
that the namespace-scope template is really a partial instantiation of a
friend template.
For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds
all instantiations and specializations of the class type, including
partial instantiations and partial specializations, so that if we
explicitly specialize a partial instantiation we can walk the list
in maybe_process_partial_specialization and reassign them or complain
as appropriate.
In both cases, the TREE_PURPOSE of each node contains the arguments
used; the TREE_VALUE contains the generated variable. The template
arguments are always complete. For example, given:
template <class T> struct S1 {
template <class U> struct S2 {};
template <class U> struct S2<U*> {};
};
the record for the partial specialization will contain, as its
argument list, { {T}, {U*} }, and will be on the
DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template
<class U> struct S1<T>::S2'.
This list is not used for other templates. */
#define DECL_TEMPLATE_INSTANTIATIONS(NODE) \
DECL_SIZE_UNIT (TEMPLATE_DECL_CHECK (NODE))
/* For a class template, this list contains the partial
specializations of this template. (Full specializations are not
recorded on this list.) The TREE_PURPOSE holds the arguments used
in the partial specialization (e.g., for `template <class T> struct
S<T*, int>' this will be `T*, int'.) The arguments will also include
any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL
for the partial specialization. The TREE_TYPE is the _TYPE node for
the partial specialization.
This list is not used for other templates. */
#define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \
DECL_SIZE (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a DECL which is actually a template parameter. Keep
these checks in ascending tree code order. */
#define DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) \
&& (TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == PARM_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL))
/* Mark NODE as a template parameter. */
#define SET_DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) = 1)
/* Nonzero if NODE is a template template parameter. */
#define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE))
/* Nonzero for a DECL that represents a function template. */
#define DECL_FUNCTION_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL)
/* Nonzero for a DECL that represents a class template or alias
template. */
#define DECL_TYPE_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL)
/* Nonzero for a DECL that represents a class template. */
#define DECL_CLASS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a TEMPLATE_DECL that represents an alias template. */
#define DECL_ALIAS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a NODE which declares a type. */
#define DECL_DECLARES_TYPE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE))
/* Nonzero if NODE declares a function. */
#define DECL_DECLARES_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE))
/* Nonzero if NODE is the typedef implicitly generated for a type when
the type is declared. In C++, `struct S {};' is roughly
equivalent to `struct S {}; typedef struct S S;' in C.
DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this
example. In C++, there is a second implicit typedef for each
class, called the injected-class-name, in the scope of `S' itself, so that
you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that typedef. */
#define DECL_IMPLICIT_TYPEDEF_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE))
#define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \
(DECL_LANG_FLAG_2 (NODE) = 1)
#define DECL_SELF_REFERENCE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE))
#define SET_DECL_SELF_REFERENCE_P(NODE) \
(DECL_LANG_FLAG_4 (NODE) = 1)
/* A `primary' template is one that has its own template header and is not
a partial specialization. A member function of a class template is a
template, but not primary. A member template is primary. Friend
templates are primary, too. */
/* Returns the primary template corresponding to these parameters. */
#define DECL_PRIMARY_TEMPLATE(NODE) \
(TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE)))
/* Returns nonzero if NODE is a primary template. */
#define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE))
/* Nonzero iff NODE is a specialization of a template. The value
indicates the type of specializations:
1=implicit instantiation
2=partial or explicit specialization, e.g.:
template <> int min<int> (int, int),
3=explicit instantiation, e.g.:
template int min<int> (int, int);
Note that NODE will be marked as a specialization even if the
template it is instantiating is not a primary template. For
example, given:
template <typename T> struct O {
void f();
struct I {};
};
both O<int>::f and O<int>::I will be marked as instantiations.
If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also
be non-NULL. */
#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template)
/* Like DECL_USE_TEMPLATE, but for class types. */
#define CLASSTYPE_USE_TEMPLATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->use_template)
/* True if NODE is a specialization of a primary template. */
#define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \
(CLASS_TYPE_P (NODE) \
&& CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1)
#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) & 1)
#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2)
#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2)
/* Returns true for an explicit or partial specialization of a class
template. */
#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 2)
#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 2)
#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1)
#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1)
#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 1)
#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 1)
#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3)
#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3)
#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 3)
#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 3)
/* Nonzero if DECL is a friend function which is an instantiation
from the point of view of the compiler, but not from the point of
view of the language. For example given:
template <class T> struct S { friend void f(T) {}; };
the declaration of `void f(int)' generated when S<int> is
instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be
a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */
#define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \
(DECL_LANG_SPECIFIC (DECL) && DECL_TEMPLATE_INFO (DECL) \
&& !DECL_USE_TEMPLATE (DECL))
/* Nonzero if DECL is a function generated from a function 'temploid',
i.e. template, member of class template, or dependent friend. */
#define DECL_TEMPLOID_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INSTANTIATION (DECL) \
|| DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL))
/* Nonzero if DECL is either defined implicitly by the compiler or
generated from a temploid. */
#define DECL_GENERATED_P(DECL) \
(DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL))
/* Nonzero iff we are currently processing a declaration for an
entity with its own template parameter list, and which is not a
full specialization. */
#define PROCESSING_REAL_TEMPLATE_DECL_P() \
(processing_template_decl > template_class_depth (current_scope ()))
/* Nonzero if this VAR_DECL or FUNCTION_DECL has already been
instantiated, i.e. its definition has been generated from the
pattern given in the template. */
#define DECL_TEMPLATE_INSTANTIATED(NODE) \
DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE))
/* We know what we're doing with this decl now. */
#define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE)
/* DECL_EXTERNAL must be set on a decl until the decl is actually emitted,
so that assemble_external will work properly. So we have this flag to
tell us whether the decl is really not external.
This flag does not indicate whether or not the decl is defined in the
current translation unit; it indicates whether or not we should emit the
decl at the end of compilation if it is defined and needed. */
#define DECL_NOT_REALLY_EXTERN(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern)
#define DECL_REALLY_EXTERN(NODE) \
(DECL_EXTERNAL (NODE) \
&& (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE)))
/* A thunk is a stub function.
A thunk is an alternate entry point for an ordinary FUNCTION_DECL.
The address of the ordinary FUNCTION_DECL is given by the
DECL_INITIAL, which is always an ADDR_EXPR whose operand is a
FUNCTION_DECL. The job of the thunk is to either adjust the this
pointer before transferring control to the FUNCTION_DECL, or call
FUNCTION_DECL and then adjust the result value. Note, the result
pointer adjusting thunk must perform a call to the thunked
function, (or be implemented via passing some invisible parameter
to the thunked function, which is modified to perform the
adjustment just before returning).
A thunk may perform either, or both, of the following operations:
o Adjust the this or result pointer by a constant offset.
o Adjust the this or result pointer by looking up a vcall or vbase offset
in the vtable.
A this pointer adjusting thunk converts from a base to a derived
class, and hence adds the offsets. A result pointer adjusting thunk
converts from a derived class to a base, and hence subtracts the
offsets. If both operations are performed, then the constant
adjustment is performed first for this pointer adjustment and last
for the result pointer adjustment.
The constant adjustment is given by THUNK_FIXED_OFFSET. If the
vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is
used. For this pointer adjusting thunks, it is the vcall offset
into the vtable. For result pointer adjusting thunks it is the
binfo of the virtual base to convert to. Use that binfo's vbase
offset.
It is possible to have equivalent covariant thunks. These are
distinct virtual covariant thunks whose vbase offsets happen to
have the same value. THUNK_ALIAS is used to pick one as the
canonical thunk, which will get all the this pointer adjusting
thunks attached to it. */
/* An integer indicating how many bytes should be subtracted from the
this or result pointer when this function is called. */
#define THUNK_FIXED_OFFSET(DECL) \
(DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset)
/* A tree indicating how to perform the virtual adjustment. For a this
adjusting thunk it is the number of bytes to be added to the vtable
to find the vcall offset. For a result adjusting thunk, it is the
binfo of the relevant virtual base. If NULL, then there is no
virtual adjust. (The vptr is always located at offset zero from
the this or result pointer.) (If the covariant type is within the
class hierarchy being laid out, the vbase index is not yet known
at the point we need to create the thunks, hence the need to use
binfos.) */
#define THUNK_VIRTUAL_OFFSET(DECL) \
(LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access)
/* A thunk which is equivalent to another thunk. */
#define THUNK_ALIAS(DECL) \
(DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info)
/* For thunk NODE, this is the FUNCTION_DECL thunked to. It is
possible for the target to be a thunk too. */
#define THUNK_TARGET(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* True for a SCOPE_REF iff the "template" keyword was used to
indicate that the qualified name denotes a template. */
#define QUALIFIED_NAME_IS_TEMPLATE(NODE) \
(TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE)))
/* True for an OMP_ATOMIC that has dependent parameters. These are stored
as an expr in operand 1, and integer_zero_node in operand 0. */
#define OMP_ATOMIC_DEPENDENT_P(NODE) \
(TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST)
/* Used while gimplifying continue statements bound to OMP_FOR nodes. */
#define OMP_FOR_GIMPLIFYING_P(NODE) \
(TREE_LANG_FLAG_0 (OMP_LOOP_CHECK (NODE)))
/* A language-specific token attached to the OpenMP data clauses to
hold code (or code fragments) related to ctors, dtors, and op=.
See semantics.c for details. */
#define CP_OMP_CLAUSE_INFO(NODE) \
TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
OMP_CLAUSE_LINEAR))
/* Nonzero if this transaction expression's body contains statements. */
#define TRANSACTION_EXPR_IS_STMT(NODE) \
TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE))
/* These macros provide convenient access to the various _STMT nodes
created when parsing template declarations. */
#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0)
#define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1)
#define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0)
#define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1)
#define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0)
/* Nonzero if this try block is a function try block. */
#define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE))
#define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0)
#define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1)
#define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE))
/* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run
and the VAR_DECL for which this cleanup exists. */
#define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0)
#define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1)
#define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2)
/* IF_STMT accessors. These give access to the condition of the if
statement, the then block of the if statement, and the else block
of the if statement if it exists. */
#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0)
#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1)
#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2)
#define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3)
#define IF_STMT_CONSTEXPR_P(NODE) TREE_LANG_FLAG_0 (IF_STMT_CHECK (NODE))
/* WHILE_STMT accessors. These give access to the condition of the
while statement and the body of the while statement, respectively. */
#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0)
#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1)
/* DO_STMT accessors. These give access to the condition of the do
statement and the body of the do statement, respectively. */
#define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0)
#define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1)
/* FOR_STMT accessors. These give access to the init statement,
condition, update expression, and body of the for statement,
respectively. */
#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0)
#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1)
#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2)
#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3)
#define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4)
/* RANGE_FOR_STMT accessors. These give access to the declarator,
expression, body, and scope of the statement, respectively. */
#define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0)
#define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1)
#define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2)
#define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3)
#define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE))
#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0)
#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1)
#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2)
#define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3)
/* STMT_EXPR accessor. */
#define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0)
/* EXPR_STMT accessor. This gives the expression associated with an
expression statement. */
#define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0)
/* True if this TARGET_EXPR was created by build_cplus_new, and so we can
discard it if it isn't useful. */
#define TARGET_EXPR_IMPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR is the result of list-initialization of a
temporary. */
#define TARGET_EXPR_LIST_INIT_P(NODE) \
TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR expresses direct-initialization of an object
to be named later. */
#define TARGET_EXPR_DIRECT_INIT_P(NODE) \
TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE))
/* True if NODE is a TARGET_EXPR that just expresses a copy of its INITIAL; if
the initializer has void type, it's doing something more complicated. */
#define SIMPLE_TARGET_EXPR_P(NODE) \
(TREE_CODE (NODE) == TARGET_EXPR \
&& !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (NODE))))
/* True if EXPR expresses direct-initialization of a TYPE. */
#define DIRECT_INIT_EXPR_P(TYPE,EXPR) \
(TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \
&& same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR)))
/* True if this CONVERT_EXPR is for a conversion to virtual base in
an NSDMI, and should be re-evaluated when used in a constructor. */
#define CONVERT_EXPR_VBASE_PATH(NODE) \
TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE))
/* True if SIZEOF_EXPR argument is type. */
#define SIZEOF_EXPR_TYPE_P(NODE) \
TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE))
/* An enumeration of the kind of tags that C++ accepts. */
enum tag_types {
none_type = 0, /* Not a tag type. */
record_type, /* "struct" types. */
class_type, /* "class" types. */
union_type, /* "union" types. */
enum_type, /* "enum" types. */
typename_type, /* "typename" types. */
scope_type /* namespace or tagged type name followed by :: */
};
/* The various kinds of lvalues we distinguish. */
enum cp_lvalue_kind_flags {
clk_none = 0, /* Things that are not an lvalue. */
clk_ordinary = 1, /* An ordinary lvalue. */
clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */
clk_class = 4, /* A prvalue of class or array type. */
clk_bitfield = 8, /* An lvalue for a bit-field. */
clk_packed = 16 /* An lvalue for a packed field. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum cp_lvalue_kind_flags. */
typedef int cp_lvalue_kind;
/* Various kinds of template specialization, instantiation, etc. */
enum tmpl_spec_kind {
tsk_none, /* Not a template at all. */
tsk_invalid_member_spec, /* An explicit member template
specialization, but the enclosing
classes have not all been explicitly
specialized. */
tsk_invalid_expl_inst, /* An explicit instantiation containing
template parameter lists. */
tsk_excessive_parms, /* A template declaration with too many
template parameter lists. */
tsk_insufficient_parms, /* A template declaration with too few
parameter lists. */
tsk_template, /* A template declaration. */
tsk_expl_spec, /* An explicit specialization. */
tsk_expl_inst /* An explicit instantiation. */
};
/* The various kinds of access. BINFO_ACCESS depends on these being
two bit quantities. The numerical values are important; they are
used to initialize RTTI data structures, so changing them changes
the ABI. */
enum access_kind {
ak_none = 0, /* Inaccessible. */
ak_public = 1, /* Accessible, as a `public' thing. */
ak_protected = 2, /* Accessible, as a `protected' thing. */
ak_private = 3 /* Accessible, as a `private' thing. */
};
/* The various kinds of special functions. If you add to this list,
you should update special_function_p as well. */
enum special_function_kind {
sfk_none = 0, /* Not a special function. This enumeral
must have value zero; see
special_function_p. */
sfk_constructor, /* A constructor. */
sfk_copy_constructor, /* A copy constructor. */
sfk_move_constructor, /* A move constructor. */
sfk_copy_assignment, /* A copy assignment operator. */
sfk_move_assignment, /* A move assignment operator. */
sfk_destructor, /* A destructor. */
sfk_complete_destructor, /* A destructor for complete objects. */
sfk_base_destructor, /* A destructor for base subobjects. */
sfk_deleting_destructor, /* A destructor for complete objects that
deletes the object after it has been
destroyed. */
sfk_conversion, /* A conversion operator. */
sfk_deduction_guide, /* A class template deduction guide. */
sfk_inheriting_constructor /* An inheriting constructor */
};
/* The various kinds of linkage. From [basic.link],
A name is said to have linkage when it might denote the same
object, reference, function, type, template, namespace or value
as a name introduced in another scope:
-- When a name has external linkage, the entity it denotes can
be referred to from scopes of other translation units or from
other scopes of the same translation unit.
-- When a name has internal linkage, the entity it denotes can
be referred to by names from other scopes in the same
translation unit.
-- When a name has no linkage, the entity it denotes cannot be
referred to by names from other scopes. */
enum linkage_kind {
lk_none, /* No linkage. */
lk_internal, /* Internal linkage. */
lk_external /* External linkage. */
};
enum duration_kind {
dk_static,
dk_thread,
dk_auto,
dk_dynamic
};
/* Bitmask flags to control type substitution. */
enum tsubst_flags {
tf_none = 0, /* nothing special */
tf_error = 1 << 0, /* give error messages */
tf_warning = 1 << 1, /* give warnings too */
tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */
tf_keep_type_decl = 1 << 3, /* retain typedef type decls
(make_typename_type use) */
tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal
instantiate_type use) */
tf_user = 1 << 5, /* found template must be a user template
(lookup_template_class use) */
tf_conv = 1 << 6, /* We are determining what kind of
conversion might be permissible,
not actually performing the
conversion. */
tf_decltype = 1 << 7, /* We are the operand of decltype.
Used to implement the special rules
for calls in decltype (5.2.2/11). */
tf_partial = 1 << 8, /* Doing initial explicit argument
substitution in fn_type_unification. */
tf_fndecl_type = 1 << 9, /* Substituting the type of a function
declaration. */
tf_no_cleanup = 1 << 10, /* Do not build a cleanup
(build_target_expr and friends) */
/* Convenient substitution flags combinations. */
tf_warning_or_error = tf_warning | tf_error
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum tsubst_flags. */
typedef int tsubst_flags_t;
/* The kind of checking we can do looking in a class hierarchy. */
enum base_access_flags {
ba_any = 0, /* Do not check access, allow an ambiguous base,
prefer a non-virtual base */
ba_unique = 1 << 0, /* Must be a unique base. */
ba_check_bit = 1 << 1, /* Check access. */
ba_check = ba_unique | ba_check_bit,
ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum base_access_flags. */
typedef int base_access;
/* The various kinds of access check during parsing. */
enum deferring_kind {
dk_no_deferred = 0, /* Check access immediately */
dk_deferred = 1, /* Deferred check */
dk_no_check = 2 /* No access check */
};
/* The kind of base we can find, looking in a class hierarchy.
Values <0 indicate we failed. */
enum base_kind {
bk_inaccessible = -3, /* The base is inaccessible */
bk_ambig = -2, /* The base is ambiguous */
bk_not_base = -1, /* It is not a base */
bk_same_type = 0, /* It is the same type */
bk_proper_base = 1, /* It is a proper base */
bk_via_virtual = 2 /* It is a proper base, but via a virtual
path. This might not be the canonical
binfo. */
};
/* Node for "pointer to (virtual) function".
This may be distinct from ptr_type_node so gdb can distinguish them. */
#define vfunc_ptr_type_node vtable_entry_type
/* For building calls to `delete'. */
extern GTY(()) tree integer_two_node;
/* The number of function bodies which we are currently processing.
(Zero if we are at namespace scope, one inside the body of a
function, two inside the body of a function in a local class, etc.) */
extern int function_depth;
/* Nonzero if we are inside eq_specializations, which affects comparison of
PARM_DECLs in cp_tree_equal. */
extern int comparing_specializations;
/* In parser.c. */
/* Nonzero if we are parsing an unevaluated operand: an operand to
sizeof, typeof, or alignof. This is a count since operands to
sizeof can be nested. */
extern int cp_unevaluated_operand;
/* RAII class used to inhibit the evaluation of operands during parsing
and template instantiation. Evaluation warnings are also inhibited. */
struct cp_unevaluated
{
cp_unevaluated ();
~cp_unevaluated ();
};
/* in pt.c */
/* These values are used for the `STRICT' parameter to type_unification and
fn_type_unification. Their meanings are described with the
documentation for fn_type_unification. */
enum unification_kind_t {
DEDUCE_CALL,
DEDUCE_CONV,
DEDUCE_EXACT
};
// An RAII class used to create a new pointer map for local
// specializations. When the stack goes out of scope, the
// previous pointer map is restored.
struct local_specialization_stack
{
local_specialization_stack ();
~local_specialization_stack ();
hash_map<tree, tree> *saved;
};
/* in class.c */
extern int current_class_depth;
/* An array of all local classes present in this translation unit, in
declaration order. */
extern GTY(()) vec<tree, va_gc> *local_classes;
/* Here's where we control how name mangling takes place. */
/* Cannot use '$' up front, because this confuses gdb
(names beginning with '$' are gdb-local identifiers).
Note that all forms in which the '$' is significant are long enough
for direct indexing (meaning that if we know there is a '$'
at a particular location, we can index into the string at
any other location that provides distinguishing characters). */
/* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler
doesn't allow '.' in symbol names. */
#ifndef NO_DOT_IN_LABEL
#define JOINER '.'
#define AUTO_TEMP_NAME "_.tmp_"
#define VFIELD_BASE ".vf"
#define VFIELD_NAME "_vptr."
#define VFIELD_NAME_FORMAT "_vptr.%s"
#else /* NO_DOT_IN_LABEL */
#ifndef NO_DOLLAR_IN_LABEL
#define JOINER '$'
#define AUTO_TEMP_NAME "_$tmp_"
#define VFIELD_BASE "$vf"
#define VFIELD_NAME "_vptr$"
#define VFIELD_NAME_FORMAT "_vptr$%s"
#else /* NO_DOLLAR_IN_LABEL */
#define AUTO_TEMP_NAME "__tmp_"
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \
sizeof (AUTO_TEMP_NAME) - 1))
#define VTABLE_NAME "__vt_"
#define VTABLE_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \
sizeof (VTABLE_NAME) - 1))
#define VFIELD_BASE "__vfb"
#define VFIELD_NAME "__vptr_"
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \
sizeof (VFIELD_NAME) - 1))
#define VFIELD_NAME_FORMAT "__vptr_%s"
#endif /* NO_DOLLAR_IN_LABEL */
#endif /* NO_DOT_IN_LABEL */
#define THIS_NAME "this"
#define IN_CHARGE_NAME "__in_chrg"
#define VTBL_PTR_TYPE "__vtbl_ptr_type"
#define VTABLE_DELTA_NAME "__delta"
#define VTABLE_PFN_NAME "__pfn"
#define LAMBDANAME_PREFIX "__lambda"
#define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d"
#define UDLIT_OP_ANSI_PREFIX "operator\"\""
#define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s"
#define UDLIT_OP_MANGLED_PREFIX "li"
#define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s"
#define UDLIT_OPER_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), \
UDLIT_OP_ANSI_PREFIX, \
sizeof (UDLIT_OP_ANSI_PREFIX) - 1))
#define UDLIT_OP_SUFFIX(ID_NODE) \
(IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1)
#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL)
#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
&& IDENTIFIER_POINTER (ID_NODE)[2] == 't' \
&& IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1))
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1))
#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */
/* Nonzero if we're done parsing and into end-of-file activities.
Two if we're done with front-end processing. */
extern int at_eof;
/* True if note_mangling_alias should enqueue mangling aliases for
later generation, rather than emitting them right away. */
extern bool defer_mangling_aliases;
/* True if noexcept is part of the type (i.e. in C++17). */
extern bool flag_noexcept_type;
/* A list of namespace-scope objects which have constructors or
destructors which reside in the global scope. The decl is stored
in the TREE_VALUE slot and the initializer is stored in the
TREE_PURPOSE slot. */
extern GTY(()) tree static_aggregates;
/* Likewise, for thread local storage. */
extern GTY(()) tree tls_aggregates;
enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG };
/* These are uses as bits in flags passed to various functions to
control their behavior. Despite the LOOKUP_ prefix, many of these
do not control name lookup. ??? Functions using these flags should
probably be modified to accept explicit boolean flags for the
behaviors relevant to them. */
/* Check for access violations. */
#define LOOKUP_PROTECT (1 << 0)
#define LOOKUP_NORMAL (LOOKUP_PROTECT)
/* Even if the function found by lookup is a virtual function, it
should be called directly. */
#define LOOKUP_NONVIRTUAL (1 << 1)
/* Non-converting (i.e., "explicit") constructors are not tried. This flag
indicates that we are not performing direct-initialization. */
#define LOOKUP_ONLYCONVERTING (1 << 2)
#define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING)
/* If a temporary is created, it should be created so that it lives
as long as the current variable bindings; otherwise it only lives
until the end of the complete-expression. It also forces
direct-initialization in cases where other parts of the compiler
have already generated a temporary, such as reference
initialization and the catch parameter. */
#define DIRECT_BIND (1 << 3)
/* We're performing a user-defined conversion, so more user-defined
conversions are not permitted (only built-in conversions). */
#define LOOKUP_NO_CONVERSION (1 << 4)
/* The user has explicitly called a destructor. (Therefore, we do
not need to check that the object is non-NULL before calling the
destructor.) */
#define LOOKUP_DESTRUCTOR (1 << 5)
/* Do not permit references to bind to temporaries. */
#define LOOKUP_NO_TEMP_BIND (1 << 6)
/* Do not accept objects, and possibly namespaces. */
#define LOOKUP_PREFER_TYPES (1 << 7)
/* Do not accept objects, and possibly types. */
#define LOOKUP_PREFER_NAMESPACES (1 << 8)
/* Accept types or namespaces. */
#define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES)
/* Return friend declarations and un-declared builtin functions.
(Normally, these entities are registered in the symbol table, but
not found by lookup.) */
#define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1)
/* Prefer that the lvalue be treated as an rvalue. */
#define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1)
/* We're inside an init-list, so narrowing conversions are ill-formed. */
#define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1)
/* We're looking up a constructor for list-initialization. */
#define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1)
/* This is the first parameter of a copy constructor. */
#define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1)
/* We only want to consider list constructors. */
#define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1)
/* Return after determining which function to call and checking access.
Used by sythesized_method_walk to determine which functions will
be called to initialize subobjects, in order to determine exception
specification and possible implicit delete.
This is kind of a hack, but exiting early avoids problems with trying
to perform argument conversions when the class isn't complete yet. */
#define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1)
/* Used by calls from defaulted functions to limit the overload set to avoid
cycles trying to declare them (core issue 1092). */
#define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1)
/* Used in calls to store_init_value to suppress its usual call to
digest_init. */
#define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1)
/* An instantiation with explicit template arguments. */
#define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1)
/* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */
#define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1)
/* Used by case_conversion to disregard non-integral conversions. */
#define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1)
/* Used for delegating constructors in order to diagnose self-delegation. */
#define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1)
#define LOOKUP_NAMESPACES_ONLY(F) \
(((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_TYPES_ONLY(F) \
(!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH)
/* These flags are used by the conversion code.
CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
CONV_STATIC : Perform the explicit conversions for static_cast.
CONV_CONST : Perform the explicit conversions for const_cast.
CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
CONV_PRIVATE : Perform upcasts to private bases.
CONV_FORCE_TEMP : Require a new temporary when converting to the same
aggregate type. */
#define CONV_IMPLICIT 1
#define CONV_STATIC 2
#define CONV_CONST 4
#define CONV_REINTERPRET 8
#define CONV_PRIVATE 16
/* #define CONV_NONCONVERTING 32 */
#define CONV_FORCE_TEMP 64
#define CONV_FOLD 128
#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET)
#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP)
#define CONV_BACKEND_CONVERT (CONV_OLD_CONVERT | CONV_FOLD)
/* Used by build_expr_type_conversion to indicate which types are
acceptable as arguments to the expression under consideration. */
#define WANT_INT 1 /* integer types, including bool */
#define WANT_FLOAT 2 /* floating point types */
#define WANT_ENUM 4 /* enumerated types */
#define WANT_POINTER 8 /* pointer types */
#define WANT_NULL 16 /* null pointer constant */
#define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */
#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX)
/* Used with comptypes, and related functions, to guide type
comparison. */
#define COMPARE_STRICT 0 /* Just check if the types are the
same. */
#define COMPARE_BASE 1 /* Check to see if the second type is
derived from the first. */
#define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in
reverse. */
#define COMPARE_REDECLARATION 4 /* The comparison is being done when
another declaration of an existing
entity is seen. */
#define COMPARE_STRUCTURAL 8 /* The comparison is intended to be
structural. The actual comparison
will be identical to
COMPARE_STRICT. */
/* Used with push_overloaded_decl. */
#define PUSH_GLOBAL 0 /* Push the DECL into namespace scope,
regardless of the current scope. */
#define PUSH_LOCAL 1 /* Push the DECL into the current
scope. */
#define PUSH_USING 2 /* We are pushing this DECL as the
result of a using declaration. */
/* Used with start function. */
#define SF_DEFAULT 0 /* No flags. */
#define SF_PRE_PARSED 1 /* The function declaration has
already been parsed. */
#define SF_INCLASS_INLINE 2 /* The function is an inline, defined
in the class body. */
/* Used with start_decl's initialized parameter. */
#define SD_UNINITIALIZED 0
#define SD_INITIALIZED 1
#define SD_DEFAULTED 2
#define SD_DELETED 3
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2
is derived from TYPE1, or if TYPE2 is a pointer (reference) to a
class derived from the type pointed to (referred to) by TYPE1. */
#define same_or_base_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_BASE)
/* These macros are used to access a TEMPLATE_PARM_INDEX. */
#define TEMPLATE_PARM_INDEX_CAST(NODE) \
((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE))
#define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index)
#define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level)
#define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE))
#define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level)
#define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl)
#define TEMPLATE_PARM_PARAMETER_PACK(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE)))
/* These macros are for accessing the fields of TEMPLATE_TYPE_PARM,
TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */
#define TEMPLATE_TYPE_PARM_INDEX(NODE) \
(TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \
TEMPLATE_TEMPLATE_PARM, \
BOUND_TEMPLATE_TEMPLATE_PARM)))
#define TEMPLATE_TYPE_IDX(NODE) \
(TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_LEVEL(NODE) \
(TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \
(TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_DECL(NODE) \
(TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \
(TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE)))
/* For a C++17 class deduction placeholder, the template it represents. */
#define CLASS_PLACEHOLDER_TEMPLATE(NODE) \
(DECL_INITIAL (TYPE_NAME (TEMPLATE_TYPE_PARM_CHECK (NODE))))
/* Contexts in which auto deduction occurs. These flags are
used to control diagnostics in do_auto_deduction. */
enum auto_deduction_context
{
adc_unspecified, /* Not given */
adc_variable_type, /* Variable initializer deduction */
adc_return_type, /* Return type deduction */
adc_unify, /* Template argument deduction */
adc_requirement, /* Argument deduction constraint */
adc_decomp_type /* Decomposition declaration initializer deduction */
};
/* True if this type-parameter belongs to a class template, used by C++17
class template argument deduction. */
#define TEMPLATE_TYPE_PARM_FOR_CLASS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
/* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */
#define AUTO_IS_DECLTYPE(NODE) \
(TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
/* These constants can used as bit flags in the process of tree formatting.
TFF_PLAIN_IDENTIFIER: unqualified part of a name.
TFF_SCOPE: include the class and namespace scope of the name.
TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name.
TFF_DECL_SPECIFIERS: print decl-specifiers.
TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with
a class-key (resp. `enum').
TFF_RETURN_TYPE: include function return type.
TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values.
TFF_EXCEPTION_SPECIFICATION: show function exception specification.
TFF_TEMPLATE_HEADER: show the template<...> header in a
template-declaration.
TFF_TEMPLATE_NAME: show only template-name.
TFF_EXPR_IN_PARENS: parenthesize expressions.
TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments.
TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the
top-level entity.
TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments
identical to their defaults.
TFF_NO_TEMPLATE_BINDINGS: do not print information about the template
arguments for a function template specialization.
TFF_POINTER: we are printing a pointer type. */
#define TFF_PLAIN_IDENTIFIER (0)
#define TFF_SCOPE (1)
#define TFF_CHASE_TYPEDEF (1 << 1)
#define TFF_DECL_SPECIFIERS (1 << 2)
#define TFF_CLASS_KEY_OR_ENUM (1 << 3)
#define TFF_RETURN_TYPE (1 << 4)
#define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5)
#define TFF_EXCEPTION_SPECIFICATION (1 << 6)
#define TFF_TEMPLATE_HEADER (1 << 7)
#define TFF_TEMPLATE_NAME (1 << 8)
#define TFF_EXPR_IN_PARENS (1 << 9)
#define TFF_NO_FUNCTION_ARGUMENTS (1 << 10)
#define TFF_UNQUALIFIED_NAME (1 << 11)
#define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12)
#define TFF_NO_TEMPLATE_BINDINGS (1 << 13)
#define TFF_POINTER (1 << 14)
/* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM
node. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \
((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TYPE_TI_TEMPLATE (NODE) \
: TYPE_NAME (NODE))
/* in lex.c */
extern void init_reswords (void);
typedef struct GTY(()) operator_name_info_t {
/* The IDENTIFIER_NODE for the operator. */
tree identifier;
/* The name of the operator. */
const char *name;
/* The mangled name of the operator. */
const char *mangled_name;
/* The arity of the operator. */
int arity;
} operator_name_info_t;
/* A mapping from tree codes to operator name information. */
extern GTY(()) operator_name_info_t operator_name_info
[(int) MAX_TREE_CODES];
/* Similar, but for assignment operators. */
extern GTY(()) operator_name_info_t assignment_operator_name_info
[(int) MAX_TREE_CODES];
/* A type-qualifier, or bitmask therefore, using the TYPE_QUAL
constants. */
typedef int cp_cv_quals;
/* Non-static member functions have an optional virt-specifier-seq.
There is a VIRT_SPEC value for each virt-specifier.
They can be combined by bitwise-or to form the complete set of
virt-specifiers for a member function. */
enum virt_specifier
{
VIRT_SPEC_UNSPECIFIED = 0x0,
VIRT_SPEC_FINAL = 0x1,
VIRT_SPEC_OVERRIDE = 0x2
};
/* A type-qualifier, or bitmask therefore, using the VIRT_SPEC
constants. */
typedef int cp_virt_specifiers;
/* Wherever there is a function-cv-qual, there could also be a ref-qualifier:
[dcl.fct]
The return type, the parameter-type-list, the ref-qualifier, and
the cv-qualifier-seq, but not the default arguments or the exception
specification, are part of the function type.
REF_QUAL_NONE Ordinary member function with no ref-qualifier
REF_QUAL_LVALUE Member function with the &-ref-qualifier
REF_QUAL_RVALUE Member function with the &&-ref-qualifier */
enum cp_ref_qualifier {
REF_QUAL_NONE = 0,
REF_QUAL_LVALUE = 1,
REF_QUAL_RVALUE = 2
};
/* A storage class. */
enum cp_storage_class {
/* sc_none must be zero so that zeroing a cp_decl_specifier_seq
sets the storage_class field to sc_none. */
sc_none = 0,
sc_auto,
sc_register,
sc_static,
sc_extern,
sc_mutable
};
/* An individual decl-specifier. This is used to index the array of
locations for the declspecs in struct cp_decl_specifier_seq
below. */
enum cp_decl_spec {
ds_first,
ds_signed = ds_first,
ds_unsigned,
ds_short,
ds_long,
ds_const,
ds_volatile,
ds_restrict,
ds_inline,
ds_virtual,
ds_explicit,
ds_friend,
ds_typedef,
ds_alias,
ds_constexpr,
ds_complex,
ds_thread,
ds_type_spec,
ds_redefined_builtin_type_spec,
ds_attribute,
ds_std_attribute,
ds_storage_class,
ds_long_long,
ds_concept,
ds_last /* This enumerator must always be the last one. */
};
/* A decl-specifier-seq. */
struct cp_decl_specifier_seq {
/* An array of locations for the declaration sepecifiers, indexed by
enum cp_decl_spec_word. */
source_location locations[ds_last];
/* The primary type, if any, given by the decl-specifier-seq.
Modifiers, like "short", "const", and "unsigned" are not
reflected here. This field will be a TYPE, unless a typedef-name
was used, in which case it will be a TYPE_DECL. */
tree type;
/* The attributes, if any, provided with the specifier sequence. */
tree attributes;
/* The c++11 attributes that follows the type specifier. */
tree std_attributes;
/* If non-NULL, a built-in type that the user attempted to redefine
to some other type. */
tree redefined_builtin_type;
/* The storage class specified -- or sc_none if no storage class was
explicitly specified. */
cp_storage_class storage_class;
/* For the __intN declspec, this stores the index into the int_n_* arrays. */
int int_n_idx;
/* True iff TYPE_SPEC defines a class or enum. */
BOOL_BITFIELD type_definition_p : 1;
/* True iff multiple types were (erroneously) specified for this
decl-specifier-seq. */
BOOL_BITFIELD multiple_types_p : 1;
/* True iff multiple storage classes were (erroneously) specified
for this decl-specifier-seq or a combination of a storage class
with a typedef specifier. */
BOOL_BITFIELD conflicting_specifiers_p : 1;
/* True iff at least one decl-specifier was found. */
BOOL_BITFIELD any_specifiers_p : 1;
/* True iff at least one type-specifier was found. */
BOOL_BITFIELD any_type_specifiers_p : 1;
/* True iff "int" was explicitly provided. */
BOOL_BITFIELD explicit_int_p : 1;
/* True iff "__intN" was explicitly provided. */
BOOL_BITFIELD explicit_intN_p : 1;
/* True iff "char" was explicitly provided. */
BOOL_BITFIELD explicit_char_p : 1;
/* True iff ds_thread is set for __thread, not thread_local. */
BOOL_BITFIELD gnu_thread_keyword_p : 1;
/* True iff the type is a decltype. */
BOOL_BITFIELD decltype_p : 1;
};
/* The various kinds of declarators. */
enum cp_declarator_kind {
cdk_id,
cdk_function,
cdk_array,
cdk_pointer,
cdk_reference,
cdk_ptrmem,
cdk_decomp,
cdk_error
};
/* A declarator. */
typedef struct cp_declarator cp_declarator;
typedef struct cp_parameter_declarator cp_parameter_declarator;
/* A parameter, before it has been semantically analyzed. */
struct cp_parameter_declarator {
/* The next parameter, or NULL_TREE if none. */
cp_parameter_declarator *next;
/* The decl-specifiers-seq for the parameter. */
cp_decl_specifier_seq decl_specifiers;
/* The declarator for the parameter. */
cp_declarator *declarator;
/* The default-argument expression, or NULL_TREE, if none. */
tree default_argument;
/* True iff this is a template parameter pack. */
bool template_parameter_pack_p;
};
/* A declarator. */
struct cp_declarator {
/* The kind of declarator. */
ENUM_BITFIELD (cp_declarator_kind) kind : 4;
/* Whether we parsed an ellipsis (`...') just before the declarator,
to indicate this is a parameter pack. */
BOOL_BITFIELD parameter_pack_p : 1;
location_t id_loc; /* Currently only set for cdk_id, cdk_decomp and
cdk_function. */
/* GNU Attributes that apply to this declarator. If the declarator
is a pointer or a reference, these attribute apply to the type
pointed to. */
tree attributes;
/* Standard C++11 attributes that apply to this declarator. If the
declarator is a pointer or a reference, these attributes apply
to the pointer, rather than to the type pointed to. */
tree std_attributes;
/* For all but cdk_id, cdk_decomp and cdk_error, the contained declarator.
For cdk_id, cdk_decomp and cdk_error, guaranteed to be NULL. */
cp_declarator *declarator;
union {
/* For identifiers. */
struct {
/* If non-NULL, the qualifying scope (a NAMESPACE_DECL or
*_TYPE) for this identifier. */
tree qualifying_scope;
/* The unqualified name of the entity -- an IDENTIFIER_NODE,
BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */
tree unqualified_name;
/* If this is the name of a function, what kind of special
function (if any). */
special_function_kind sfk;
} id;
/* For functions. */
struct {
/* The parameters to the function as a TREE_LIST of decl/default. */
tree parameters;
/* The cv-qualifiers for the function. */
cp_cv_quals qualifiers;
/* The virt-specifiers for the function. */
cp_virt_specifiers virt_specifiers;
/* The ref-qualifier for the function. */
cp_ref_qualifier ref_qualifier;
/* The transaction-safety qualifier for the function. */
tree tx_qualifier;
/* The exception-specification for the function. */
tree exception_specification;
/* The late-specified return type, if any. */
tree late_return_type;
/* The trailing requires-clause, if any. */
tree requires_clause;
} function;
/* For arrays. */
struct {
/* The bounds to the array. */
tree bounds;
} array;
/* For cdk_pointer and cdk_ptrmem. */
struct {
/* The cv-qualifiers for the pointer. */
cp_cv_quals qualifiers;
/* For cdk_ptrmem, the class type containing the member. */
tree class_type;
} pointer;
/* For cdk_reference */
struct {
/* The cv-qualifiers for the reference. These qualifiers are
only used to diagnose ill-formed code. */
cp_cv_quals qualifiers;
/* Whether this is an rvalue reference */
bool rvalue_ref;
} reference;
} u;
};
/* A level of template instantiation. */
struct GTY((chain_next ("%h.next"))) tinst_level {
/* The immediately deeper level in the chain. */
struct tinst_level *next;
/* The original node. Can be either a DECL (for a function or static
data member) or a TYPE (for a class), depending on what we were
asked to instantiate. */
tree decl;
/* The location where the template is instantiated. */
location_t locus;
/* errorcount+sorrycount when we pushed this level. */
int errors;
/* True if the location is in a system header. */
bool in_system_header_p;
};
bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec);
/* Return the type of the `this' parameter of FNTYPE. */
inline tree
type_of_this_parm (const_tree fntype)
{
function_args_iterator iter;
gcc_assert (TREE_CODE (fntype) == METHOD_TYPE);
function_args_iter_init (&iter, fntype);
return function_args_iter_cond (&iter);
}
/* Return the class of the `this' parameter of FNTYPE. */
inline tree
class_of_this_parm (const_tree fntype)
{
return TREE_TYPE (type_of_this_parm (fntype));
}
/* True iff T is a variable template declaration. */
inline bool
variable_template_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (!PRIMARY_TEMPLATE_P (t))
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_P (r);
return false;
}
/* True iff T is a variable concept definition. That is, T is
a variable template declared with the concept specifier. */
inline bool
variable_concept_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_P (r) && DECL_DECLARED_CONCEPT_P (r);
return false;
}
/* True iff T is a concept definition. That is, T is a variable or function
template declared with the concept specifier. */
inline bool
concept_template_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_OR_FUNCTION_DECL_P (r) && DECL_DECLARED_CONCEPT_P (r);
return false;
}
/* A parameter list indicating for a function with no parameters,
e.g "int f(void)". */
extern cp_parameter_declarator *no_parameters;
/* in call.c */
extern bool check_dtor_name (tree, tree);
int magic_varargs_p (tree);
extern tree build_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_addr_func (tree, tsubst_flags_t);
extern void set_flags_from_callee (tree);
extern tree build_call_a (tree, int, tree*);
extern tree build_call_n (tree, int, ...);
extern bool null_ptr_cst_p (tree);
extern bool null_member_pointer_value_p (tree);
extern bool sufficient_parms_p (const_tree);
extern tree type_decays_to (tree);
extern tree extract_call_expr (tree);
extern tree build_user_type_conversion (tree, tree, int,
tsubst_flags_t);
extern tree build_new_function_call (tree, vec<tree, va_gc> **, bool,
tsubst_flags_t);
extern tree build_operator_new_call (tree, vec<tree, va_gc> **, tree *,
tree *, tree, tree, tree *,
tsubst_flags_t);
extern tree build_new_method_call (tree, tree, vec<tree, va_gc> **,
tree, int, tree *,
tsubst_flags_t);
extern tree build_special_member_call (tree, tree, vec<tree, va_gc> **,
tree, int, tsubst_flags_t);
extern tree build_new_op (location_t, enum tree_code,
int, tree, tree, tree, tree *,
tsubst_flags_t);
extern tree build_op_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool aligned_allocation_fn_p (tree);
extern bool usual_deallocation_fn_p (tree);
extern tree build_op_delete_call (enum tree_code, tree, tree,
bool, tree, tree,
tsubst_flags_t);
extern bool can_convert (tree, tree, tsubst_flags_t);
extern bool can_convert_standard (tree, tree, tsubst_flags_t);
extern bool can_convert_arg (tree, tree, tree, int,
tsubst_flags_t);
extern bool can_convert_arg_bad (tree, tree, tree, int,
tsubst_flags_t);
extern bool enforce_access (tree, tree, tree,
tsubst_flags_t);
extern void push_defarg_context (tree);
extern void pop_defarg_context (void);
extern tree convert_default_arg (tree, tree, tree, int,
tsubst_flags_t);
extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t);
extern tree build_x_va_arg (source_location, tree, tree);
extern tree cxx_type_promotes_to (tree);
extern tree type_passed_as (tree);
extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t);
extern bool is_properly_derived_from (tree, tree);
extern tree initialize_reference (tree, tree, int,
tsubst_flags_t);
extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**);
extern tree make_temporary_var_for_ref_to_temp (tree, tree);
extern bool type_has_extended_temps (tree);
extern tree strip_top_quals (tree);
extern bool reference_related_p (tree, tree);
extern int remaining_arguments (tree);
extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t);
extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int);
extern tree build_integral_nontype_arg_conv (tree, tree, tsubst_flags_t);
extern tree perform_direct_initialization_if_possible (tree, tree, bool,
tsubst_flags_t);
extern tree in_charge_arg_for_name (tree);
extern tree build_cxx_call (tree, int, tree *,
tsubst_flags_t);
extern bool is_std_init_list (tree);
extern bool is_list_ctor (tree);
extern void validate_conversion_obstack (void);
extern void mark_versions_used (tree);
extern tree get_function_version_dispatcher (tree);
/* in class.c */
extern tree build_vfield_ref (tree, tree);
extern tree build_if_in_charge (tree true_stmt, tree false_stmt = void_node);
extern tree build_base_path (enum tree_code, tree,
tree, int, tsubst_flags_t);
extern tree convert_to_base (tree, tree, bool, bool,
tsubst_flags_t);
extern tree convert_to_base_statically (tree, tree);
extern tree build_vtbl_ref (tree, tree);
extern tree build_vfn_ref (tree, tree);
extern tree get_vtable_decl (tree, int);
extern void resort_type_method_vec (void *, void *,
gt_pointer_operator, void *);
extern bool add_method (tree, tree, tree);
extern tree declared_access (tree);
extern tree currently_open_class (tree);
extern tree currently_open_derived_class (tree);
extern tree outermost_open_class (void);
extern tree current_nonlambda_class_type (void);
extern tree finish_struct (tree, tree);
extern void finish_struct_1 (tree);
extern int resolves_to_fixed_type_p (tree, int *);
extern void init_class_processing (void);
extern int is_empty_class (tree);
extern bool is_really_empty_class (tree);
extern void pushclass (tree);
extern void popclass (void);
extern void push_nested_class (tree);
extern void pop_nested_class (void);
extern int current_lang_depth (void);
extern void push_lang_context (tree);
extern void pop_lang_context (void);
extern tree instantiate_type (tree, tree, tsubst_flags_t);
extern void print_class_statistics (void);
extern void build_self_reference (void);
extern int same_signature_p (const_tree, const_tree);
extern void maybe_add_class_template_decl_list (tree, tree, int);
extern void unreverse_member_declarations (tree);
extern void invalidate_class_lookup_cache (void);
extern void maybe_note_name_used_in_class (tree, tree);
extern void note_name_declared_in_class (tree, tree);
extern tree get_vtbl_decl_for_binfo (tree);
extern bool vptr_via_virtual_p (tree);
extern void debug_class (tree);
extern void debug_thunks (tree);
extern void set_linkage_according_to_type (tree, tree);
extern void determine_key_method (tree);
extern void check_for_override (tree, tree);
extern void push_class_stack (void);
extern void pop_class_stack (void);
extern bool default_ctor_p (tree);
extern bool type_has_user_nondefault_constructor (tree);
extern tree in_class_defaulted_default_constructor (tree);
extern bool user_provided_p (tree);
extern bool type_has_user_provided_constructor (tree);
extern bool type_has_non_user_provided_default_constructor (tree);
extern bool vbase_has_user_provided_move_assign (tree);
extern tree default_init_uninitialized_part (tree);
extern bool trivial_default_constructor_is_constexpr (tree);
extern bool type_has_constexpr_default_constructor (tree);
extern bool type_has_virtual_destructor (tree);
extern bool type_has_move_constructor (tree);
extern bool type_has_move_assign (tree);
extern bool type_has_user_declared_move_constructor (tree);
extern bool type_has_user_declared_move_assign(tree);
extern bool type_build_ctor_call (tree);
extern bool type_build_dtor_call (tree);
extern void explain_non_literal_class (tree);
extern void inherit_targ_abi_tags (tree);
extern void defaulted_late_check (tree);
extern bool defaultable_fn_check (tree);
extern void check_abi_tags (tree);
extern tree missing_abi_tags (tree);
extern void fixup_type_variants (tree);
extern void fixup_attribute_variants (tree);
extern tree* decl_cloned_function_p (const_tree, bool);
extern void clone_function_decl (tree, int);
extern void adjust_clone_args (tree);
extern void deduce_noexcept_on_destructor (tree);
extern void insert_late_enum_def_into_classtype_sorted_fields (tree, tree);
extern bool uniquely_derived_from_p (tree, tree);
extern bool publicly_uniquely_derived_p (tree, tree);
extern tree common_enclosing_class (tree, tree);
/* in cvt.c */
extern tree convert_to_reference (tree, tree, int, int, tree,
tsubst_flags_t);
extern tree convert_from_reference (tree);
extern tree force_rvalue (tree, tsubst_flags_t);
extern tree ocp_convert (tree, tree, int, int,
tsubst_flags_t);
extern tree cp_convert (tree, tree, tsubst_flags_t);
extern tree cp_convert_and_check (tree, tree, tsubst_flags_t);
extern tree cp_fold_convert (tree, tree);
extern tree cp_get_callee (tree);
extern tree cp_get_callee_fndecl (tree);
extern tree cp_get_fndecl_from_callee (tree);
extern tree convert_to_void (tree, impl_conv_void,
tsubst_flags_t);
extern tree convert_force (tree, tree, int,
tsubst_flags_t);
extern tree build_expr_type_conversion (int, tree, bool);
extern tree type_promotes_to (tree);
extern tree perform_qualification_conversions (tree, tree);
extern bool tx_safe_fn_type_p (tree);
extern tree tx_unsafe_fn_variant (tree);
extern bool fnptr_conv_p (tree, tree);
extern tree strip_fnptr_conv (tree);
/* in name-lookup.c */
extern tree pushdecl (tree);
extern tree pushdecl_maybe_friend (tree, bool);
extern void maybe_push_cleanup_level (tree);
extern tree pushtag (tree, tree, tag_scope);
extern tree make_anon_name (void);
extern tree pushdecl_top_level_maybe_friend (tree, bool);
extern tree pushdecl_top_level_and_finish (tree, tree);
extern tree check_for_out_of_scope_variable (tree);
extern void dump (cp_binding_level &ref);
extern void dump (cp_binding_level *ptr);
extern void print_other_binding_stack (cp_binding_level *);
extern tree maybe_push_decl (tree);
extern tree current_decl_namespace (void);
/* decl.c */
extern tree poplevel (int, int, int);
extern void cxx_init_decl_processing (void);
enum cp_tree_node_structure_enum cp_tree_node_structure
(union lang_tree_node *);
extern void finish_scope (void);
extern void push_switch (tree);
extern void pop_switch (void);
extern tree make_lambda_name (void);
extern int decls_match (tree, tree);
extern tree duplicate_decls (tree, tree, bool);
extern tree declare_local_label (tree);
extern tree define_label (location_t, tree);
extern void check_goto (tree);
extern bool check_omp_return (void);
extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t);
extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t);
extern tree build_library_fn_ptr (const char *, tree, int);
extern tree build_cp_library_fn_ptr (const char *, tree, int);
extern tree push_library_fn (tree, tree, tree, int);
extern tree push_void_library_fn (tree, tree, int);
extern tree push_throw_library_fn (tree, tree);
extern void warn_misplaced_attr_for_class_type (source_location location,
tree class_type);
extern tree check_tag_decl (cp_decl_specifier_seq *, bool);
extern tree shadow_tag (cp_decl_specifier_seq *);
extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool);
extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *);
extern void start_decl_1 (tree, bool);
extern bool check_array_initializer (tree, tree, tree);
extern void cp_finish_decl (tree, tree, bool, tree, int);
extern tree lookup_decomp_type (tree);
extern void cp_finish_decomp (tree, tree, unsigned int);
extern int cp_complete_array_type (tree *, tree, bool);
extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t);
extern tree build_ptrmemfunc_type (tree);
extern tree build_ptrmem_type (tree, tree);
/* the grokdeclarator prototype is in decl.h */
extern tree build_this_parm (tree, cp_cv_quals);
extern tree grokparms (tree, tree *);
extern int copy_fn_p (const_tree);
extern bool move_fn_p (const_tree);
extern bool move_signature_fn_p (const_tree);
extern tree get_scope_of_declarator (const cp_declarator *);
extern void grok_special_member_properties (tree);
extern int grok_ctor_properties (const_tree, const_tree);
extern bool grok_op_properties (tree, bool);
extern tree xref_tag (enum tag_types, tree, tag_scope, bool);
extern tree xref_tag_from_type (tree, tree, tag_scope);
extern void xref_basetypes (tree, tree);
extern tree start_enum (tree, tree, tree, tree, bool, bool *);
extern void finish_enum_value_list (tree);
extern void finish_enum (tree);
extern void build_enumerator (tree, tree, tree, tree, location_t);
extern tree lookup_enumerator (tree, tree);
extern bool start_preparsed_function (tree, tree, int);
extern bool start_function (cp_decl_specifier_seq *,
const cp_declarator *, tree);
extern tree begin_function_body (void);
extern void finish_function_body (tree);
extern tree outer_curly_brace_block (tree);
extern tree finish_function (int);
extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern void maybe_register_incomplete_var (tree);
extern void maybe_commonize_var (tree);
extern void complete_vars (tree);
extern tree static_fn_type (tree);
extern void revert_static_member_fn (tree);
extern void fixup_anonymous_aggr (tree);
extern tree compute_array_index_type (tree, tree, tsubst_flags_t);
extern tree check_default_argument (tree, tree, tsubst_flags_t);
typedef int (*walk_namespaces_fn) (tree, void *);
extern int walk_namespaces (walk_namespaces_fn,
void *);
extern int wrapup_globals_for_namespace (tree, void *);
extern int diagnose_inline_vars_for_namespace (tree, void *);
extern tree create_implicit_typedef (tree, tree);
extern int local_variable_p (const_tree);
extern tree register_dtor_fn (tree);
extern tmpl_spec_kind current_tmpl_spec_kind (int);
extern tree cp_fname_init (const char *, tree *);
extern tree cxx_builtin_function (tree decl);
extern tree cxx_builtin_function_ext_scope (tree decl);
extern tree check_elaborated_type_specifier (enum tag_types, tree, bool);
extern void warn_extern_redeclared_static (tree, tree);
extern tree cxx_comdat_group (tree);
extern bool cp_missing_noreturn_ok_p (tree);
extern bool is_direct_enum_init (tree, tree);
extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *);
extern tree check_var_type (tree, tree);
extern tree reshape_init (tree, tree, tsubst_flags_t);
extern tree next_initializable_field (tree);
extern tree fndecl_declared_return_type (tree);
extern bool undeduced_auto_decl (tree);
extern bool require_deduced_type (tree, tsubst_flags_t = tf_warning_or_error);
extern tree finish_case_label (location_t, tree, tree);
extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t);
/* in decl2.c */
extern void note_mangling_alias (tree, tree);
extern void generate_mangling_aliases (void);
extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier);
extern tree build_pointer_ptrmemfn_type (tree);
extern tree change_return_type (tree, tree);
extern void maybe_retrofit_in_chrg (tree);
extern void maybe_make_one_only (tree);
extern bool vague_linkage_p (tree);
extern void grokclassfn (tree, tree,
enum overload_flags);
extern tree grok_array_decl (location_t, tree, tree, bool);
extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t);
extern tree check_classfn (tree, tree, tree);
extern void check_member_template (tree);
extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, bool, tree, tree);
extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, tree);
extern bool any_dependent_type_attributes_p (tree);
extern tree cp_reconstruct_complex_type (tree, tree);
extern bool attributes_naming_typedef_ok (tree);
extern void cplus_decl_attributes (tree *, tree, int);
extern void finish_anon_union (tree);
extern void cxx_post_compilation_parsing_cleanups (void);
extern tree coerce_new_type (tree);
extern tree coerce_delete_type (tree);
extern void comdat_linkage (tree);
extern void determine_visibility (tree);
extern void constrain_class_visibility (tree);
extern void reset_type_linkage (tree);
extern void tentative_decl_linkage (tree);
extern void import_export_decl (tree);
extern tree build_cleanup (tree);
extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool decl_defined_p (tree);
extern bool decl_constant_var_p (tree);
extern bool decl_maybe_constant_var_p (tree);
extern void no_linkage_error (tree);
extern void check_default_args (tree);
extern bool mark_used (tree);
extern bool mark_used (tree, tsubst_flags_t);
extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
extern tree cp_build_parm_decl (tree, tree);
extern tree get_guard (tree);
extern tree get_guard_cond (tree, bool);
extern tree set_guard (tree);
extern tree get_tls_wrapper_fn (tree);
extern void mark_needed (tree);
extern bool decl_needed_p (tree);
extern void note_vague_linkage_fn (tree);
extern void note_variable_template_instantiation (tree);
extern tree build_artificial_parm (tree, tree);
extern bool possibly_inlined_p (tree);
extern int parm_index (tree);
extern tree vtv_start_verification_constructor_init_function (void);
extern tree vtv_finish_verification_constructor_init_function (tree);
extern bool cp_omp_mappable_type (tree);
/* in error.c */
extern const char *type_as_string (tree, int);
extern const char *type_as_string_translate (tree, int);
extern const char *decl_as_string (tree, int);
extern const char *decl_as_string_translate (tree, int);
extern const char *decl_as_dwarf_string (tree, int);
extern const char *expr_as_string (tree, int);
extern const char *lang_decl_name (tree, int, bool);
extern const char *lang_decl_dwarf_name (tree, int, bool);
extern const char *language_to_string (enum languages);
extern const char *class_key_or_enum_as_string (tree);
extern void maybe_warn_variadic_templates (void);
extern void maybe_warn_cpp0x (cpp0x_warn_str str);
extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern location_t location_of (tree);
extern void qualified_name_lookup_error (tree, tree, tree,
location_t);
/* in except.c */
extern void init_exception_processing (void);
extern tree expand_start_catch_block (tree);
extern void expand_end_catch_block (void);
extern tree build_exc_ptr (void);
extern tree build_throw (tree);
extern int nothrow_libfn_p (const_tree);
extern void check_handlers (tree);
extern tree finish_noexcept_expr (tree, tsubst_flags_t);
extern bool expr_noexcept_p (tree, tsubst_flags_t);
extern void perform_deferred_noexcept_checks (void);
extern bool nothrow_spec_p (const_tree);
extern bool type_noexcept_p (const_tree);
extern bool type_throw_all_p (const_tree);
extern tree build_noexcept_spec (tree, int);
extern void choose_personality_routine (enum languages);
extern tree build_must_not_throw_expr (tree,tree);
extern tree eh_type_info (tree);
extern tree begin_eh_spec_block (void);
extern void finish_eh_spec_block (tree, tree);
extern tree build_eh_type_type (tree);
extern tree cp_protect_cleanup_actions (void);
extern tree create_try_catch_expr (tree, tree);
/* in expr.c */
extern tree cplus_expand_constant (tree);
extern tree mark_rvalue_use (tree,
location_t = UNKNOWN_LOCATION,
bool = true);
extern tree mark_lvalue_use (tree);
extern tree mark_type_use (tree);
extern void mark_exp_read (tree);
/* friend.c */
extern int is_friend (tree, tree);
extern void make_friend_class (tree, tree, bool);
extern void add_friend (tree, tree, bool);
extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool);
extern void set_global_friend (tree);
extern bool is_global_friend (tree);
/* in init.c */
extern tree expand_member_init (tree);
extern void emit_mem_initializers (tree);
extern tree build_aggr_init (tree, tree, int,
tsubst_flags_t);
extern int is_class_type (tree, int);
extern tree get_type_value (tree);
extern tree build_zero_init (tree, tree, bool);
extern tree build_value_init (tree, tsubst_flags_t);
extern tree build_value_init_noctor (tree, tsubst_flags_t);
extern tree get_nsdmi (tree, bool);
extern tree build_offset_ref (tree, tree, bool,
tsubst_flags_t);
extern tree throw_bad_array_new_length (void);
extern bool type_has_new_extended_alignment (tree);
extern unsigned malloc_alignment (void);
extern tree build_new (vec<tree, va_gc> **, tree, tree,
vec<tree, va_gc> **, int,
tsubst_flags_t);
extern tree get_temp_regvar (tree, tree);
extern tree build_vec_init (tree, tree, tree, bool, int,
tsubst_flags_t);
extern tree build_delete (tree, tree,
special_function_kind,
int, int, tsubst_flags_t);
extern void push_base_cleanups (void);
extern tree build_vec_delete (tree, tree,
special_function_kind, int,
tsubst_flags_t);
extern tree create_temporary_var (tree);
extern void initialize_vtbl_ptrs (tree);
extern tree scalar_constant_value (tree);
extern tree decl_really_constant_value (tree);
extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool);
extern tree build_vtbl_address (tree);
extern bool maybe_reject_flexarray_init (tree, tree);
/* in lex.c */
extern void cxx_dup_lang_specific_decl (tree);
extern void yyungetc (int, int);
extern tree unqualified_name_lookup_error (tree,
location_t = UNKNOWN_LOCATION);
extern tree unqualified_fn_lookup_error (cp_expr);
extern tree build_lang_decl (enum tree_code, tree, tree);
extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree);
extern void retrofit_lang_decl (tree);
extern tree copy_decl (tree);
extern tree copy_type (tree);
extern tree cxx_make_type (enum tree_code);
extern tree make_class_type (enum tree_code);
extern bool cxx_init (void);
extern void cxx_finish (void);
extern bool in_main_input_context (void);
/* in method.c */
extern void init_method (void);
extern tree make_thunk (tree, bool, tree, tree);
extern void finish_thunk (tree);
extern void use_thunk (tree, bool);
extern bool trivial_fn_p (tree);
extern tree forward_parm (tree);
extern bool is_trivially_xible (enum tree_code, tree, tree);
extern tree get_defaulted_eh_spec (tree);
extern tree unevaluated_noexcept_spec (void);
extern void after_nsdmi_defaulted_late_checks (tree);
extern bool maybe_explain_implicit_delete (tree);
extern void explain_implicit_non_constexpr (tree);
extern void deduce_inheriting_ctor (tree);
extern void synthesize_method (tree);
extern tree lazily_declare_fn (special_function_kind,
tree);
extern tree skip_artificial_parms_for (const_tree, tree);
extern int num_artificial_parms_for (const_tree);
extern tree make_alias_for (tree, tree);
extern tree get_copy_ctor (tree, tsubst_flags_t);
extern tree get_copy_assign (tree);
extern tree get_default_ctor (tree);
extern tree get_dtor (tree, tsubst_flags_t);
extern tree strip_inheriting_ctors (tree);
extern tree inherited_ctor_binfo (tree);
extern bool ctor_omit_inherited_parms (tree);
extern tree locate_ctor (tree);
extern tree implicitly_declare_fn (special_function_kind, tree,
bool, tree, tree);
/* In optimize.c */
extern bool maybe_clone_body (tree);
/* In parser.c */
extern tree cp_convert_range_for (tree, tree, tree, tree, unsigned int, bool);
extern bool parsing_nsdmi (void);
extern bool parsing_default_capturing_generic_lambda (void);
extern void inject_this_parameter (tree, cp_cv_quals);
/* in pt.c */
extern bool check_template_shadow (tree);
extern tree get_innermost_template_args (tree, int);
extern void maybe_begin_member_template_processing (tree);
extern void maybe_end_member_template_processing (void);
extern tree finish_member_template_decl (tree);
extern void begin_template_parm_list (void);
extern bool begin_specialization (void);
extern void reset_specialization (void);
extern void end_specialization (void);
extern void begin_explicit_instantiation (void);
extern void end_explicit_instantiation (void);
extern void check_unqualified_spec_or_inst (tree, location_t);
extern tree check_explicit_specialization (tree, tree, int, int);
extern int num_template_headers_for_class (tree);
extern void check_template_variable (tree);
extern tree make_auto (void);
extern tree make_decltype_auto (void);
extern tree make_template_placeholder (tree);
extern bool template_placeholder_p (tree);
extern tree do_auto_deduction (tree, tree, tree);
extern tree do_auto_deduction (tree, tree, tree,
tsubst_flags_t,
auto_deduction_context,
tree = NULL_TREE,
int = LOOKUP_NORMAL);
extern tree type_uses_auto (tree);
extern tree type_uses_auto_or_concept (tree);
extern void append_type_to_template_for_access_check (tree, tree, tree,
location_t);
extern tree convert_generic_types_to_packs (tree, int, int);
extern tree splice_late_return_type (tree, tree);
extern bool is_auto (const_tree);
extern bool is_auto_or_concept (const_tree);
extern tree process_template_parm (tree, location_t, tree,
bool, bool);
extern tree end_template_parm_list (tree);
extern void end_template_parm_list (void);
extern void end_template_decl (void);
extern tree maybe_update_decl_type (tree, tree);
extern bool check_default_tmpl_args (tree, tree, bool, bool, int);
extern tree push_template_decl (tree);
extern tree push_template_decl_real (tree, bool);
extern tree add_inherited_template_parms (tree, tree);
extern bool redeclare_class_template (tree, tree, tree);
extern tree lookup_template_class (tree, tree, tree, tree,
int, tsubst_flags_t);
extern tree lookup_template_function (tree, tree);
extern tree lookup_template_variable (tree, tree);
extern int uses_template_parms (tree);
extern bool uses_template_parms_level (tree, int);
extern bool in_template_function (void);
extern tree instantiate_class_template (tree);
extern tree instantiate_template (tree, tree, tsubst_flags_t);
extern tree fn_type_unification (tree, tree, tree,
const tree *, unsigned int,
tree, unification_kind_t, int,
bool, bool);
extern void mark_decl_instantiated (tree, int);
extern int more_specialized_fn (tree, tree, int);
extern void do_decl_instantiation (tree, tree);
extern void do_type_instantiation (tree, tree, tsubst_flags_t);
extern bool always_instantiate_p (tree);
extern void maybe_instantiate_noexcept (tree);
extern tree instantiate_decl (tree, bool, bool);
extern int comp_template_parms (const_tree, const_tree);
extern bool uses_parameter_packs (tree);
extern bool template_parameter_pack_p (const_tree);
extern bool function_parameter_pack_p (const_tree);
extern bool function_parameter_expanded_from_pack_p (tree, tree);
extern tree make_pack_expansion (tree);
extern bool check_for_bare_parameter_packs (tree);
extern tree build_template_info (tree, tree);
extern tree get_template_info (const_tree);
extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree);
extern int template_class_depth (tree);
extern int is_specialization_of (tree, tree);
extern bool is_specialization_of_friend (tree, tree);
extern tree get_pattern_parm (tree, tree);
extern int comp_template_args (tree, tree, tree * = NULL,
tree * = NULL, bool = false);
extern int template_args_equal (tree, tree, bool = false);
extern tree maybe_process_partial_specialization (tree);
extern tree most_specialized_instantiation (tree);
extern void print_candidates (tree);
extern void instantiate_pending_templates (int);
extern tree tsubst_default_argument (tree, tree, tree,
tsubst_flags_t);
extern tree tsubst (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t,
tree, bool, bool);
extern tree tsubst_expr (tree, tree, tsubst_flags_t,
tree, bool);
extern tree tsubst_pack_expansion (tree, tree, tsubst_flags_t, tree);
extern tree most_general_template (tree);
extern tree get_mostly_instantiated_function_type (tree);
extern bool problematic_instantiation_changed (void);
extern void record_last_problematic_instantiation (void);
extern struct tinst_level *current_instantiation(void);
extern bool instantiating_current_function_p (void);
extern tree maybe_get_template_decl_from_type_decl (tree);
extern int processing_template_parmlist;
extern bool dependent_type_p (tree);
extern bool dependent_scope_p (tree);
extern bool any_dependent_template_arguments_p (const_tree);
extern bool dependent_template_p (tree);
extern bool dependent_template_id_p (tree, tree);
extern bool type_dependent_expression_p (tree);
extern bool type_dependent_object_expression_p (tree);
extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *);
extern bool any_type_dependent_elements_p (const_tree);
extern bool type_dependent_expression_p_push (tree);
extern bool value_dependent_expression_p (tree);
extern bool instantiation_dependent_expression_p (tree);
extern bool instantiation_dependent_uneval_expression_p (tree);
extern bool any_value_dependent_elements_p (const_tree);
extern bool dependent_omp_for_p (tree, tree, tree, tree);
extern tree resolve_typename_type (tree, bool);
extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
extern void make_args_non_dependent (vec<tree, va_gc> *);
extern bool reregister_specialization (tree, tree, tree);
extern tree instantiate_non_dependent_expr (tree);
extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t);
extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t);
extern tree instantiate_non_dependent_or_null (tree);
extern bool variable_template_specialization_p (tree);
extern bool alias_type_or_template_p (tree);
extern bool alias_template_specialization_p (const_tree);
extern bool dependent_alias_template_spec_p (const_tree);
extern bool explicit_class_specialization_p (tree);
extern bool push_tinst_level (tree);
extern bool push_tinst_level_loc (tree, location_t);
extern void pop_tinst_level (void);
extern struct tinst_level *outermost_tinst_level(void);
extern void init_template_processing (void);
extern void print_template_statistics (void);
bool template_template_parameter_p (const_tree);
bool template_type_parameter_p (const_tree);
extern bool primary_template_instantiation_p (const_tree);
extern tree get_primary_template_innermost_parameters (const_tree);
extern tree get_template_parms_at_level (tree, int);
extern tree get_template_innermost_arguments (const_tree);
extern tree get_template_argument_pack_elems (const_tree);
extern tree get_function_template_decl (const_tree);
extern tree resolve_nondeduced_context (tree, tsubst_flags_t);
extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val);
extern tree coerce_template_parms (tree, tree, tree);
extern tree coerce_template_parms (tree, tree, tree, tsubst_flags_t);
extern void register_local_specialization (tree, tree);
extern tree retrieve_local_specialization (tree);
extern tree extract_fnparm_pack (tree, tree *);
extern tree template_parm_to_arg (tree);
extern tree dguide_name (tree);
extern bool dguide_name_p (tree);
extern bool deduction_guide_p (const_tree);
extern bool copy_guide_p (const_tree);
extern bool template_guide_p (const_tree);
/* in repo.c */
extern void init_repo (void);
extern int repo_emit_p (tree);
extern bool repo_export_class_p (const_tree);
extern void finish_repo (void);
/* in rtti.c */
/* A vector of all tinfo decls that haven't been emitted yet. */
extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls;
extern void init_rtti_processing (void);
extern tree build_typeid (tree, tsubst_flags_t);
extern tree get_tinfo_decl (tree);
extern tree get_typeid (tree, tsubst_flags_t);
extern tree build_headof (tree);
extern tree build_dynamic_cast (tree, tree, tsubst_flags_t);
extern void emit_support_tinfos (void);
extern bool emit_tinfo_decl (tree);
/* in search.c */
extern bool accessible_base_p (tree, tree, bool);
extern tree lookup_base (tree, tree, base_access,
base_kind *, tsubst_flags_t);
extern tree dcast_base_hint (tree, tree);
extern int accessible_p (tree, tree, bool);
extern int accessible_in_template_p (tree, tree);
extern tree lookup_field_1 (tree, tree, bool);
extern tree lookup_field (tree, tree, int, bool);
extern int lookup_fnfields_1 (tree, tree);
extern tree lookup_fnfields_slot (tree, tree);
extern tree lookup_fnfields_slot_nolazy (tree, tree);
extern int class_method_index_for_fn (tree, tree);
extern tree lookup_fnfields (tree, tree, int);
extern tree lookup_member (tree, tree, int, bool,
tsubst_flags_t);
extern tree lookup_member_fuzzy (tree, tree, bool);
extern int look_for_overrides (tree, tree);
extern void get_pure_virtuals (tree);
extern void maybe_suppress_debug_info (tree);
extern void note_debug_info_needed (tree);
extern void print_search_statistics (void);
extern void reinit_search_statistics (void);
extern tree current_scope (void);
extern int at_function_scope_p (void);
extern bool at_class_scope_p (void);
extern bool at_namespace_scope_p (void);
extern tree context_for_name_lookup (tree);
extern tree lookup_conversions (tree);
extern tree binfo_from_vbase (tree);
extern tree binfo_for_vbase (tree, tree);
extern tree look_for_overrides_here (tree, tree);
#define dfs_skip_bases ((tree)1)
extern tree dfs_walk_all (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree dfs_walk_once (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree binfo_via_virtual (tree, tree);
extern bool binfo_direct_p (tree);
extern tree build_baselink (tree, tree, tree, tree);
extern tree adjust_result_of_qualified_name_lookup
(tree, tree, tree);
extern tree copied_binfo (tree, tree);
extern tree original_binfo (tree, tree);
extern int shared_member_p (tree);
extern bool any_dependent_bases_p (tree = current_nonlambda_class_type ());
/* The representation of a deferred access check. */
struct GTY(()) deferred_access_check {
/* The base class in which the declaration is referenced. */
tree binfo;
/* The declaration whose access must be checked. */
tree decl;
/* The declaration that should be used in the error message. */
tree diag_decl;
/* The location of this access. */
location_t loc;
};
/* in semantics.c */
extern void push_deferring_access_checks (deferring_kind);
extern void resume_deferring_access_checks (void);
extern void stop_deferring_access_checks (void);
extern void pop_deferring_access_checks (void);
extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void);
extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *);
extern void pop_to_parent_deferring_access_checks (void);
extern bool perform_access_checks (vec<deferred_access_check, va_gc> *,
tsubst_flags_t);
extern bool perform_deferred_access_checks (tsubst_flags_t);
extern bool perform_or_defer_access_check (tree, tree, tree,
tsubst_flags_t);
/* RAII sentinel to ensures that deferred access checks are popped before
a function returns. */
struct deferring_access_check_sentinel
{
deferring_access_check_sentinel ()
{
push_deferring_access_checks (dk_deferred);
}
~deferring_access_check_sentinel ()
{
pop_deferring_access_checks ();
}
};
extern int stmts_are_full_exprs_p (void);
extern void init_cp_semantics (void);
extern tree do_poplevel (tree);
extern void break_maybe_infinite_loop (void);
extern void add_decl_expr (tree);
extern tree maybe_cleanup_point_expr_void (tree);
extern tree finish_expr_stmt (tree);
extern tree begin_if_stmt (void);
extern tree finish_if_stmt_cond (tree, tree);
extern tree finish_then_clause (tree);
extern void begin_else_clause (tree);
extern void finish_else_clause (tree);
extern void finish_if_stmt (tree);
extern tree begin_while_stmt (void);
extern void finish_while_stmt_cond (tree, tree, bool);
extern void finish_while_stmt (tree);
extern tree begin_do_stmt (void);
extern void finish_do_body (tree);
extern void finish_do_stmt (tree, tree, bool);
extern tree finish_return_stmt (tree);
extern tree begin_for_scope (tree *);
extern tree begin_for_stmt (tree, tree);
extern void finish_init_stmt (tree);
extern void finish_for_cond (tree, tree, bool);
extern void finish_for_expr (tree, tree);
extern void finish_for_stmt (tree);
extern tree begin_range_for_stmt (tree, tree);
extern void finish_range_for_decl (tree, tree, tree);
extern void finish_range_for_stmt (tree);
extern tree finish_break_stmt (void);
extern tree finish_continue_stmt (void);
extern tree begin_switch_stmt (void);
extern void finish_switch_cond (tree, tree);
extern void finish_switch_stmt (tree);
extern tree finish_goto_stmt (tree);
extern tree begin_try_block (void);
extern void finish_try_block (tree);
extern void finish_handler_sequence (tree);
extern tree begin_function_try_block (tree *);
extern void finish_function_try_block (tree);
extern void finish_function_handler_sequence (tree, tree);
extern void finish_cleanup_try_block (tree);
extern tree begin_handler (void);
extern void finish_handler_parms (tree, tree);
extern void finish_handler (tree);
extern void finish_cleanup (tree, tree);
extern bool is_this_parameter (tree);
enum {
BCS_NORMAL = 0,
BCS_NO_SCOPE = 1,
BCS_TRY_BLOCK = 2,
BCS_FN_BODY = 4,
BCS_TRANSACTION = 8
};
extern tree begin_compound_stmt (unsigned int);
extern void finish_compound_stmt (tree);
extern tree finish_asm_stmt (int, tree, tree, tree, tree,
tree);
extern tree finish_label_stmt (tree);
extern void finish_label_decl (tree);
extern cp_expr finish_parenthesized_expr (cp_expr);
extern tree force_paren_expr (tree);
extern tree maybe_undo_parenthesized_ref (tree);
extern tree finish_non_static_data_member (tree, tree, tree);
extern tree begin_stmt_expr (void);
extern tree finish_stmt_expr_expr (tree, tree);
extern tree finish_stmt_expr (tree, bool);
extern tree stmt_expr_value_expr (tree);
bool empty_expr_stmt_p (tree);
extern cp_expr perform_koenig_lookup (cp_expr, vec<tree, va_gc> *,
tsubst_flags_t);
extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool,
bool, tsubst_flags_t);
extern tree lookup_and_finish_template_variable (tree, tree, tsubst_flags_t = tf_warning_or_error);
extern tree finish_template_variable (tree, tsubst_flags_t = tf_warning_or_error);
extern cp_expr finish_increment_expr (cp_expr, enum tree_code);
extern tree finish_this_expr (void);
extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t);
extern cp_expr finish_unary_op_expr (location_t, enum tree_code, cp_expr,
tsubst_flags_t);
extern tree finish_compound_literal (tree, tree, tsubst_flags_t);
extern tree finish_fname (tree);
extern void finish_translation_unit (void);
extern tree finish_template_type_parm (tree, tree);
extern tree finish_template_template_parm (tree, tree);
extern tree begin_class_definition (tree);
extern void finish_template_decl (tree);
extern tree finish_template_type (tree, tree, int);
extern tree finish_base_specifier (tree, tree, bool);
extern void finish_member_declaration (tree);
extern bool outer_automatic_var_p (tree);
extern tree process_outer_var_ref (tree, tsubst_flags_t);
extern cp_expr finish_id_expression (tree, tree, tree,
cp_id_kind *,
bool, bool, bool *,
bool, bool, bool, bool,
const char **,
location_t);
extern tree finish_typeof (tree);
extern tree finish_underlying_type (tree);
extern tree calculate_bases (tree);
extern tree finish_bases (tree, bool);
extern tree calculate_direct_bases (tree);
extern tree finish_offsetof (tree, tree, location_t);
extern void finish_decl_cleanup (tree, tree);
extern void finish_eh_cleanup (tree);
extern void emit_associated_thunks (tree);
extern void finish_mem_initializers (tree);
extern tree check_template_template_default_arg (tree);
extern bool expand_or_defer_fn_1 (tree);
extern void expand_or_defer_fn (tree);
extern void add_typedef_to_current_template_for_access_check (tree, tree,
location_t);
extern void check_accessibility_of_qualified_id (tree, tree, tree);
extern tree finish_qualified_id_expr (tree, tree, bool, bool,
bool, bool, tsubst_flags_t);
extern void simplify_aggr_init_expr (tree *);
extern void finalize_nrv (tree *, tree, tree);
extern tree omp_reduction_id (enum tree_code, tree, tree);
extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *);
extern void cp_check_omp_declare_reduction (tree);
extern void finish_omp_declare_simd_methods (tree);
extern tree finish_omp_clauses (tree, enum c_omp_region_type);
extern tree push_omp_privatization_clauses (bool);
extern void pop_omp_privatization_clauses (tree);
extern void save_omp_privatization_clauses (vec<tree> &);
extern void restore_omp_privatization_clauses (vec<tree> &);
extern void finish_omp_threadprivate (tree);
extern tree begin_omp_structured_block (void);
extern tree finish_omp_structured_block (tree);
extern tree finish_oacc_data (tree, tree);
extern tree finish_oacc_host_data (tree, tree);
extern tree finish_omp_construct (enum tree_code, tree, tree);
extern tree begin_omp_parallel (void);
extern tree finish_omp_parallel (tree, tree);
extern tree begin_omp_task (void);
extern tree finish_omp_task (tree, tree);
extern tree finish_omp_for (location_t, enum tree_code,
tree, tree, tree, tree, tree,
tree, tree, vec<tree> *, tree);
extern void finish_omp_atomic (enum tree_code, enum tree_code,
tree, tree, tree, tree, tree,
bool);
extern void finish_omp_barrier (void);
extern void finish_omp_flush (void);
extern void finish_omp_taskwait (void);
extern void finish_omp_taskyield (void);
extern void finish_omp_cancel (tree);
extern void finish_omp_cancellation_point (tree);
extern tree omp_privatize_field (tree, bool);
extern tree begin_transaction_stmt (location_t, tree *, int);
extern void finish_transaction_stmt (tree, tree, int, tree);
extern tree build_transaction_expr (location_t, tree, int, tree);
extern bool cxx_omp_create_clause_info (tree, tree, bool, bool,
bool, bool);
extern tree baselink_for_fns (tree);
extern void finish_static_assert (tree, tree, location_t,
bool);
extern tree finish_decltype_type (tree, bool, tsubst_flags_t);
extern tree finish_trait_expr (enum cp_trait_kind, tree, tree);
extern tree build_lambda_expr (void);
extern tree build_lambda_object (tree);
extern tree begin_lambda_type (tree);
extern tree lambda_capture_field_type (tree, bool, bool);
extern tree lambda_return_type (tree);
extern tree lambda_proxy_type (tree);
extern tree lambda_function (tree);
extern void apply_deduced_return_type (tree, tree);
extern tree add_capture (tree, tree, tree, bool, bool);
extern tree add_default_capture (tree, tree, tree);
extern tree build_capture_proxy (tree);
extern void insert_capture_proxy (tree);
extern void insert_pending_capture_proxies (void);
extern bool is_capture_proxy (tree);
extern bool is_normal_capture_proxy (tree);
extern void register_capture_members (tree);
extern tree lambda_expr_this_capture (tree, bool);
extern void maybe_generic_this_capture (tree, tree);
extern tree maybe_resolve_dummy (tree, bool);
extern tree current_nonlambda_function (void);
extern tree nonlambda_method_basetype (void);
extern tree current_nonlambda_scope (void);
extern bool generic_lambda_fn_p (tree);
extern void maybe_add_lambda_conv_op (tree);
extern bool is_lambda_ignored_entity (tree);
extern bool lambda_static_thunk_p (tree);
extern tree finish_builtin_launder (location_t, tree,
tsubst_flags_t);
/* in tree.c */
extern int cp_tree_operand_length (const_tree);
extern int cp_tree_code_length (enum tree_code);
void cp_free_lang_data (tree t);
extern tree force_target_expr (tree, tree, tsubst_flags_t);
extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t);
extern void lang_check_failed (const char *, int,
const char *) ATTRIBUTE_NORETURN;
extern tree stabilize_expr (tree, tree *);
extern void stabilize_call (tree, tree *);
extern bool stabilize_init (tree, tree *);
extern tree add_stmt_to_compound (tree, tree);
extern void init_tree (void);
extern bool pod_type_p (const_tree);
extern bool layout_pod_type_p (const_tree);
extern bool std_layout_type_p (const_tree);
extern bool trivial_type_p (const_tree);
extern bool trivially_copyable_p (const_tree);
extern bool type_has_unique_obj_representations (const_tree);
extern bool scalarish_type_p (const_tree);
extern bool type_has_nontrivial_default_init (const_tree);
extern bool type_has_nontrivial_copy_init (const_tree);
extern bool class_tmpl_impl_spec_p (const_tree);
extern int zero_init_p (const_tree);
extern bool check_abi_tag_redeclaration (const_tree, const_tree, const_tree);
extern bool check_abi_tag_args (tree, tree);
extern tree strip_typedefs (tree, bool * = NULL);
extern tree strip_typedefs_expr (tree, bool * = NULL);
extern tree copy_binfo (tree, tree, tree,
tree *, int);
extern int member_p (const_tree);
extern cp_lvalue_kind real_lvalue_p (const_tree);
extern cp_lvalue_kind lvalue_kind (const_tree);
extern bool glvalue_p (const_tree);
extern bool obvalue_p (const_tree);
extern bool xvalue_p (const_tree);
extern bool bitfield_p (const_tree);
extern tree cp_stabilize_reference (tree);
extern bool builtin_valid_in_constant_expr_p (const_tree);
extern tree build_min (enum tree_code, tree, ...);
extern tree build_min_nt_loc (location_t, enum tree_code,
...);
extern tree build_min_non_dep (enum tree_code, tree, ...);
extern tree build_min_non_dep_op_overload (enum tree_code, tree, tree, ...);
extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *);
extern vec<tree, va_gc>* vec_copy_and_insert (vec<tree, va_gc>*, tree, unsigned);
extern tree build_cplus_new (tree, tree, tsubst_flags_t);
extern tree build_aggr_init_expr (tree, tree);
extern tree get_target_expr (tree);
extern tree get_target_expr_sfinae (tree, tsubst_flags_t);
extern tree build_cplus_array_type (tree, tree);
extern tree build_array_of_n_type (tree, int);
extern bool array_of_runtime_bound_p (tree);
extern tree build_array_copy (tree);
extern tree build_vec_init_expr (tree, tree, tsubst_flags_t);
extern void diagnose_non_constexpr_vec_init (tree);
extern tree hash_tree_cons (tree, tree, tree);
extern tree hash_tree_chain (tree, tree);
extern tree build_qualified_name (tree, tree, tree, bool);
extern tree build_ref_qualified_type (tree, cp_ref_qualifier);
extern int is_overloaded_fn (tree);
extern tree dependent_name (tree);
extern tree get_fns (tree);
extern tree get_first_fn (tree);
extern tree ovl_cons (tree, tree);
extern tree build_overload (tree, tree);
extern tree ovl_scope (tree);
extern const char *cxx_printable_name (tree, int);
extern const char *cxx_printable_name_translate (tree, int);
extern tree canonical_eh_spec (tree);
extern tree build_exception_variant (tree, tree);
extern tree bind_template_template_parm (tree, tree);
extern tree array_type_nelts_total (tree);
extern tree array_type_nelts_top (tree);
extern tree break_out_target_exprs (tree);
extern tree build_ctor_subob_ref (tree, tree, tree);
extern tree replace_placeholders (tree, tree, bool * = NULL);
extern tree get_type_decl (tree);
extern tree decl_namespace_context (tree);
extern bool decl_anon_ns_mem_p (const_tree);
extern tree lvalue_type (tree);
extern tree error_type (tree);
extern int varargs_function_p (const_tree);
extern bool really_overloaded_fn (tree);
extern bool cp_tree_equal (tree, tree);
extern tree no_linkage_check (tree, bool);
extern void debug_binfo (tree);
extern tree build_dummy_object (tree);
extern tree maybe_dummy_object (tree, tree *);
extern int is_dummy_object (const_tree);
extern const struct attribute_spec cxx_attribute_table[];
extern tree make_ptrmem_cst (tree, tree);
extern tree cp_build_type_attribute_variant (tree, tree);
extern tree cp_build_reference_type (tree, bool);
extern tree move (tree);
extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t);
#define cp_build_qualified_type(TYPE, QUALS) \
cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error)
extern bool cv_qualified_p (const_tree);
extern tree cv_unqualified (tree);
extern special_function_kind special_function_p (const_tree);
extern int count_trees (tree);
extern int char_type_p (tree);
extern void verify_stmt_tree (tree);
extern linkage_kind decl_linkage (tree);
extern duration_kind decl_storage_duration (tree);
extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn,
void*, hash_set<tree> *);
#define cp_walk_tree(tp,func,data,pset) \
walk_tree_1 (tp, func, data, pset, cp_walk_subtrees)
#define cp_walk_tree_without_duplicates(tp,func,data) \
walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees)
extern tree rvalue (tree);
extern tree convert_bitfield_to_declared_type (tree);
extern tree cp_save_expr (tree);
extern bool cast_valid_in_integral_constant_expression_p (tree);
extern bool cxx_type_hash_eq (const_tree, const_tree);
extern tree cxx_copy_lang_qualifiers (const_tree, const_tree);
extern void cxx_print_statistics (void);
extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t);
/* in ptree.c */
extern void cxx_print_xnode (FILE *, tree, int);
extern void cxx_print_decl (FILE *, tree, int);
extern void cxx_print_type (FILE *, tree, int);
extern void cxx_print_identifier (FILE *, tree, int);
extern void cxx_print_error_function (diagnostic_context *,
const char *,
struct diagnostic_info *);
/* in typeck.c */
extern bool cxx_mark_addressable (tree, bool = false);
extern int string_conv_p (const_tree, const_tree, int);
extern tree cp_truthvalue_conversion (tree);
extern tree condition_conversion (tree);
extern tree require_complete_type (tree);
extern tree require_complete_type_sfinae (tree, tsubst_flags_t);
extern tree complete_type (tree);
extern tree complete_type_or_else (tree, tree);
extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
extern int type_unknown_p (const_tree);
enum { ce_derived, ce_type, ce_normal, ce_exact };
extern bool comp_except_specs (const_tree, const_tree, int);
extern bool comptypes (tree, tree, int);
extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree);
extern bool compparms (const_tree, const_tree);
extern int comp_cv_qualification (const_tree, const_tree);
extern int comp_cv_qualification (int, int);
extern int comp_cv_qual_signature (tree, tree);
extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool);
extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool);
extern tree cxx_alignas_expr (tree);
extern tree cxx_sizeof_nowarn (tree);
extern tree is_bitfield_expr_with_lowered_type (const_tree);
extern tree unlowered_expr_type (const_tree);
extern tree decay_conversion (tree,
tsubst_flags_t,
bool = true);
extern tree build_class_member_access_expr (cp_expr, tree, tree, bool,
tsubst_flags_t);
extern tree finish_class_member_access_expr (cp_expr, tree, bool,
tsubst_flags_t);
extern tree build_x_indirect_ref (location_t, tree,
ref_operator, tsubst_flags_t);
extern tree cp_build_indirect_ref (tree, ref_operator,
tsubst_flags_t);
extern tree build_array_ref (location_t, tree, tree);
extern tree cp_build_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t);
extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...)
ATTRIBUTE_SENTINEL;
extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_x_binary_op (location_t,
enum tree_code, tree,
enum tree_code, tree,
enum tree_code, tree *,
tsubst_flags_t);
extern tree build_x_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree build_x_unary_op (location_t,
enum tree_code, cp_expr,
tsubst_flags_t);
extern tree cp_build_addressof (location_t, tree,
tsubst_flags_t);
extern tree cp_build_addr_expr (tree, tsubst_flags_t);
extern tree cp_build_unary_op (enum tree_code, tree, bool,
tsubst_flags_t);
extern tree unary_complex_lvalue (enum tree_code, tree);
extern tree build_x_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_x_compound_expr_from_list (tree, expr_list_kind,
tsubst_flags_t);
extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *,
const char *, tsubst_flags_t);
extern tree build_x_compound_expr (location_t, tree, tree,
tsubst_flags_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t);
extern tree build_static_cast (tree, tree, tsubst_flags_t);
extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t);
extern tree build_const_cast (tree, tree, tsubst_flags_t);
extern tree build_c_cast (location_t, tree, tree);
extern cp_expr build_c_cast (location_t loc, tree type,
cp_expr expr);
extern tree cp_build_c_cast (tree, tree, tsubst_flags_t);
extern cp_expr build_x_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree convert_for_initialization (tree, tree, tree, int,
impl_conv_rhs, tree, int,
tsubst_flags_t);
extern int comp_ptr_ttypes (tree, tree);
extern bool comp_ptr_ttypes_const (tree, tree);
extern bool error_type_p (const_tree);
extern bool ptr_reasonably_similar (const_tree, const_tree);
extern tree build_ptrmemfunc (tree, tree, int, bool,
tsubst_flags_t);
extern int cp_type_quals (const_tree);
extern int type_memfn_quals (const_tree);
extern cp_ref_qualifier type_memfn_rqual (const_tree);
extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier);
extern bool cp_has_mutable_p (const_tree);
extern bool at_least_as_qualified_p (const_tree, const_tree);
extern void cp_apply_type_quals_to_decl (int, tree);
extern tree build_ptrmemfunc1 (tree, tree, tree);
extern void expand_ptrmemfunc_cst (tree, tree *, tree *);
extern tree type_after_usual_arithmetic_conversions (tree, tree);
extern tree common_pointer_type (tree, tree);
extern tree composite_pointer_type (tree, tree, tree, tree,
composite_pointer_operation,
tsubst_flags_t);
extern tree merge_types (tree, tree);
extern tree strip_array_domain (tree);
extern tree check_return_expr (tree, bool *);
extern tree cp_build_binary_op (location_t,
enum tree_code, tree, tree,
tsubst_flags_t);
extern tree build_x_vec_perm_expr (location_t,
tree, tree, tree,
tsubst_flags_t);
#define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, true)
extern tree build_simple_component_ref (tree, tree);
extern tree build_ptrmemfunc_access_expr (tree, tree);
extern tree build_address (tree);
extern tree build_nop (tree, tree);
extern tree non_reference (tree);
extern tree lookup_anon_field (tree, tree);
extern bool invalid_nonstatic_memfn_p (location_t, tree,
tsubst_flags_t);
extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t);
extern tree convert_ptrmem (tree, tree, bool, bool,
tsubst_flags_t);
extern int lvalue_or_else (tree, enum lvalue_use,
tsubst_flags_t);
extern void check_template_keyword (tree);
extern bool check_raw_literal_operator (const_tree decl);
extern bool check_literal_operator_args (const_tree, bool *, bool *);
extern void maybe_warn_about_useless_cast (tree, tree, tsubst_flags_t);
extern tree cp_perform_integral_promotions (tree, tsubst_flags_t);
extern tree finish_left_unary_fold_expr (tree, int);
extern tree finish_right_unary_fold_expr (tree, int);
extern tree finish_binary_fold_expr (tree, tree, int);
/* in typeck2.c */
extern void require_complete_eh_spec_types (tree, tree);
extern void cxx_incomplete_type_diagnostic (location_t, const_tree,
const_tree, diagnostic_t);
inline void
cxx_incomplete_type_diagnostic (const_tree value, const_tree type,
diagnostic_t diag_kind)
{
cxx_incomplete_type_diagnostic (EXPR_LOC_OR_LOC (value, input_location),
value, type, diag_kind);
}
extern void cxx_incomplete_type_error (location_t, const_tree,
const_tree);
inline void
cxx_incomplete_type_error (const_tree value, const_tree type)
{
cxx_incomplete_type_diagnostic (value, type, DK_ERROR);
}
extern void cxx_incomplete_type_inform (const_tree);
extern tree error_not_base_type (tree, tree);
extern tree binfo_or_else (tree, tree);
extern void cxx_readonly_error (tree, enum lvalue_use);
extern void complete_type_check_abstract (tree);
extern int abstract_virtuals_error (tree, tree);
extern int abstract_virtuals_error (abstract_class_use, tree);
extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t);
extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t);
extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
extern tree split_nonconstant_init (tree, tree);
extern bool check_narrowing (tree, tree, tsubst_flags_t);
extern tree digest_init (tree, tree, tsubst_flags_t);
extern tree digest_init_flags (tree, tree, int, tsubst_flags_t);
extern tree digest_nsdmi_init (tree, tree);
extern tree build_scoped_ref (tree, tree, tree *);
extern tree build_x_arrow (location_t, tree,
tsubst_flags_t);
extern tree build_m_component_ref (tree, tree, tsubst_flags_t);
extern tree build_functional_cast (tree, tree, tsubst_flags_t);
extern tree add_exception_specifier (tree, tree, int);
extern tree merge_exception_specifiers (tree, tree);
/* in mangle.c */
extern bool maybe_remove_implicit_alias (tree);
extern void init_mangle (void);
extern void mangle_decl (tree);
extern const char *mangle_type_string (tree);
extern tree mangle_typeinfo_for_type (tree);
extern tree mangle_typeinfo_string_for_type (tree);
extern tree mangle_vtbl_for_type (tree);
extern tree mangle_vtt_for_type (tree);
extern tree mangle_ctor_vtbl_for_type (tree, tree);
extern tree mangle_thunk (tree, int, tree, tree, tree);
extern tree mangle_conv_op_name_for_type (tree);
extern tree mangle_guard_variable (tree);
extern tree mangle_tls_init_fn (tree);
extern tree mangle_tls_wrapper_fn (tree);
extern bool decl_tls_wrapper_p (tree);
extern tree mangle_ref_init_variable (tree);
extern char * get_mangled_vtable_map_var_name (tree);
extern bool mangle_return_type_p (tree);
extern tree mangle_decomp (tree, vec<tree> &);
/* in dump.c */
extern bool cp_dump_tree (void *, tree);
/* In cp/cp-objcp-common.c. */
extern alias_set_type cxx_get_alias_set (tree);
extern bool cxx_warn_unused_global_decl (const_tree);
extern size_t cp_tree_size (enum tree_code);
extern bool cp_var_mod_type_p (tree, tree);
extern void cxx_initialize_diagnostics (diagnostic_context *);
extern int cxx_types_compatible_p (tree, tree);
extern void init_shadowed_var_for_decl (void);
extern bool cxx_block_may_fallthru (const_tree);
/* in cp-gimplify.c */
extern int cp_gimplify_expr (tree *, gimple_seq *,
gimple_seq *);
extern void cp_genericize (tree);
extern bool cxx_omp_const_qual_no_mutable (tree);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree);
extern tree cxx_omp_clause_default_ctor (tree, tree, tree);
extern tree cxx_omp_clause_copy_ctor (tree, tree, tree);
extern tree cxx_omp_clause_assign_op (tree, tree, tree);
extern tree cxx_omp_clause_dtor (tree, tree);
extern void cxx_omp_finish_clause (tree, gimple_seq *);
extern bool cxx_omp_privatize_by_reference (const_tree);
extern bool cxx_omp_disregard_value_expr (tree, bool);
extern void cp_fold_function (tree);
extern tree cp_fully_fold (tree);
extern void clear_fold_cache (void);
/* in name-lookup.c */
extern void suggest_alternatives_for (location_t, tree, bool);
extern bool suggest_alternative_in_explicit_scope (location_t, tree, tree);
extern tree strip_using_decl (tree);
/* Tell the binding oracle what kind of binding we are looking for. */
enum cp_oracle_request
{
CP_ORACLE_IDENTIFIER
};
/* If this is non-NULL, then it is a "binding oracle" which can lazily
create bindings when needed by the C compiler. The oracle is told
the name and type of the binding to create. It can call pushdecl
or the like to ensure the binding is visible; or do nothing,
leaving the binding untouched. c-decl.c takes note of when the
oracle has been called and will not call it again if it fails to
create a given binding. */
typedef void cp_binding_oracle_function (enum cp_oracle_request, tree identifier);
extern cp_binding_oracle_function *cp_binding_oracle;
/* in constraint.cc */
extern void init_constraint_processing ();
extern bool constraint_p (tree);
extern tree conjoin_constraints (tree, tree);
extern tree conjoin_constraints (tree);
extern tree get_constraints (tree);
extern void set_constraints (tree, tree);
extern void remove_constraints (tree);
extern tree current_template_constraints (void);
extern tree associate_classtype_constraints (tree);
extern tree build_constraints (tree, tree);
extern tree get_shorthand_constraints (tree);
extern tree build_concept_check (tree, tree, tree = NULL_TREE);
extern tree build_constrained_parameter (tree, tree, tree = NULL_TREE);
extern tree make_constrained_auto (tree, tree);
extern void placeholder_extract_concept_and_args (tree, tree&, tree&);
extern bool equivalent_placeholder_constraints (tree, tree);
extern hashval_t hash_placeholder_constraint (tree);
extern bool deduce_constrained_parameter (tree, tree&, tree&);
extern tree resolve_constraint_check (tree);
extern tree check_function_concept (tree);
extern tree finish_template_introduction (tree, tree);
extern bool valid_requirements_p (tree);
extern tree finish_concept_name (tree);
extern tree finish_shorthand_constraint (tree, tree);
extern tree finish_requires_expr (tree, tree);
extern tree finish_simple_requirement (tree);
extern tree finish_type_requirement (tree);
extern tree finish_compound_requirement (tree, tree, bool);
extern tree finish_nested_requirement (tree);
extern void check_constrained_friend (tree, tree);
extern tree tsubst_requires_expr (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_constraint (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_constraint_info (tree, tree, tsubst_flags_t, tree);
extern bool function_concept_check_p (tree);
extern tree normalize_expression (tree);
extern tree expand_concept (tree, tree);
extern bool expanding_concept ();
extern tree evaluate_constraints (tree, tree);
extern tree evaluate_function_concept (tree, tree);
extern tree evaluate_variable_concept (tree, tree);
extern tree evaluate_constraint_expression (tree, tree);
extern bool constraints_satisfied_p (tree);
extern bool constraints_satisfied_p (tree, tree);
extern tree lookup_constraint_satisfaction (tree, tree);
extern tree memoize_constraint_satisfaction (tree, tree, tree);
extern tree lookup_concept_satisfaction (tree, tree);
extern tree memoize_concept_satisfaction (tree, tree, tree);
extern tree get_concept_expansion (tree, tree);
extern tree save_concept_expansion (tree, tree, tree);
extern bool* lookup_subsumption_result (tree, tree);
extern bool save_subsumption_result (tree, tree, bool);
extern bool equivalent_constraints (tree, tree);
extern bool equivalently_constrained (tree, tree);
extern bool subsumes_constraints (tree, tree);
extern bool strictly_subsumes (tree, tree);
extern int more_constrained (tree, tree);
extern void diagnose_constraints (location_t, tree, tree);
/* in logic.cc */
extern tree decompose_conclusions (tree);
extern bool subsumes (tree, tree);
/* In class.c */
extern void cp_finish_injected_record_type (tree);
/* in vtable-class-hierarchy.c */
extern void vtv_compute_class_hierarchy_transitive_closure (void);
extern void vtv_generate_init_routine (void);
extern void vtv_save_class_info (tree);
extern void vtv_recover_class_info (void);
extern void vtv_build_vtable_verify_fndecl (void);
/* In cp/cp-array-notations.c */
extern tree expand_array_notation_exprs (tree);
bool cilkplus_an_triplet_types_ok_p (location_t, tree, tree, tree,
tree);
/* In constexpr.c */
extern void fini_constexpr (void);
extern bool literal_type_p (tree);
extern tree register_constexpr_fundef (tree, tree);
extern bool is_valid_constexpr_fn (tree, bool);
extern bool check_constexpr_ctor_body (tree, tree, bool);
extern tree ensure_literal_type_for_constexpr_object (tree);
extern bool potential_constant_expression (tree);
extern bool potential_nondependent_constant_expression (tree);
extern bool potential_nondependent_static_init_expression (tree);
extern bool potential_static_init_expression (tree);
extern bool potential_rvalue_constant_expression (tree);
extern bool require_potential_constant_expression (tree);
extern bool require_potential_rvalue_constant_expression (tree);
extern tree cxx_constant_value (tree, tree = NULL_TREE);
extern tree maybe_constant_value (tree, tree = NULL_TREE);
extern tree maybe_constant_init (tree, tree = NULL_TREE);
extern tree fold_non_dependent_expr (tree);
extern tree fold_simple (tree);
extern bool is_sub_constant_expr (tree);
extern bool reduced_constant_expression_p (tree);
extern bool is_instantiation_of_constexpr (tree);
extern bool var_in_constexpr_fn (tree);
extern bool var_in_maybe_constexpr_fn (tree);
extern void explain_invalid_constexpr_fn (tree);
extern vec<tree> cx_error_context (void);
extern tree fold_sizeof_expr (tree);
extern void clear_cv_and_fold_caches (void);
/* In c-family/cilk.c */
extern bool cilk_valid_spawn (tree);
/* In cp-ubsan.c */
extern void cp_ubsan_maybe_instrument_member_call (tree);
extern void cp_ubsan_instrument_member_accesses (tree *);
extern tree cp_ubsan_maybe_instrument_downcast (location_t, tree, tree, tree);
extern tree cp_ubsan_maybe_instrument_cast_to_vbase (location_t, tree, tree);
extern void cp_ubsan_maybe_initialize_vtbl_ptrs (tree);
/* -- end of C++ */
#endif /* ! GCC_CP_TREE_H */
|
omp_array.c |
/******************************************************************************
* * FILE: omp_array.c
* * DESCRIPTION:
* * Array addition - C/C++ Version
* * This is a simple array adition running with omp
* * AUTHOR: Victor Rodriguez
* * LAST REVISED: 04/06/05
* ******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int x = 0;
long int sz2G=20000000; // c dynamic array
long int * myarray_1 = malloc(sizeof(long int) *sz2G);
long int * myarray_2 = malloc(sizeof(long int) *sz2G);
long int * myarray_3 = malloc(sizeof(long int) *sz2G);
#pragma omp sections
{
for ( x = 0; x < sz2G ; x++ ) {
myarray_1[x]= myarray_2[x] + myarray_3[x];
}
}
free(myarray_1);
free(myarray_2);
free(myarray_3);
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(8*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(4*t1+Ny+5,16)),floord(8*t2+Ny+4,16)),floord(8*t1-8*t2+Nz+Ny+3,16));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(8*t2-Nz-252,256)),ceild(16*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(4*t1+Nx+5,256)),floord(8*t2+Nx+4,256)),floord(16*t3+Nx+12,256)),floord(8*t1-8*t2+Nz+Nx+3,256));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),16*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),16*t3+14),256*t4+254),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
counters2parallel.c | //counters2 parallel version HPC Felix Feliu
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <memory.h>
#include <malloc.h>
#include <papi.h>
#include <omp.h>
#define SIZE 1000
int main(int argc, char** argv) {
float matrixa[SIZE][SIZE], matrixb[SIZE][SIZE], mresult[SIZE][SIZE];
int i, j, k;
int events[2] = { PAPI_TOT_INS, PAPI_TOT_IIS, PAPI_LD_INS, PAPI_FP_OPS, PAPI_FP_INS, PAPI_SR_INS }, ret; //hardware counter used
long long values[2];
if (PAPI_num_counters() < 2) {
fprintf(stderr, "No hardware counters here, or PAPI not supported.\n");
exit(1);
}
if ((ret = PAPI_start_counters(events, 2)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to start counters: %s\n", PAPI_strerror(ret));
exit(1);
}
/* Initialize the Matrix arrays */
for (i = 0; i < SIZE * SIZE; i++) {
mresult[0][i] = 0.0;
matrixa[0][i] = matrixb[0][i] = rand() * (float)1.1;
}
// parallel block start
#pragma omp parallel for schedule(static,4) shared(SIZE) private(i,j,k)
/* Matrix-Matrix multiply */
for (k = 0; k < SIZE; k++)
for (j = 0; j < SIZE; j++)
for (i = 0; i < SIZE; i++)
mresult[i][j] = mresult[i][j] + matrixa[i][k] * matrixb[k][j];
if ((ret = PAPI_read_counters(values, 2)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to read counters: %s\n", PAPI_strerror(ret));
exit(1);
}
//printing options
printf("strore instructions = %lld\n", values[5]);
printf("floating point instructions = %lld\n", values[4]);
printf("floating point operations = %lld\n", values[3]);
printf("load instructions = %lld\n", values[2]);
printf("instructions issued = %lld\n", values[1]);
printf("instructions completed = %lld\n", values[0]);
exit(0);
}
|
5643.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(#P11)
{
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
for (i = 0; i < _PB_N; i++)
{
#pragma omp
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
}
/* Calculate the m * m correlation matrix. */
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
parallel_section_firstprivate.c | /* This file contains all checks for the section construct without the checks for the reduction clauses:
ordered: checks that the execution is equivalent to the serial case
*/
// Skip testing on 64 bit systems for now!
#ifndef __LP64__
#include <stdio.h>
#include "omp_testsuite.h"
int
check_parallel_section_firstprivate (FILE * logFile)
{
int sum = 7;
int sum0 = 11;
int known_sum;
#pragma omp parallel sections firstprivate(sum0)
{
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
} /*end of parallel sections */
known_sum = 11 * 3 + 7;
return (known_sum == sum);
} /* end of check_section_firstprivate */
int
crosscheck_parallel_section_firstprivate (FILE * logFile)
{
int sum = 7;
int sum0 = 11;
int known_sum;
#pragma omp parallel sections private(sum0)
{
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
} /*end of parallel sections */
known_sum = 11 * 3 + 7;
return (known_sum == sum);
} /* end of check_section_firstprivate */
#else
#warning "Not tested on 64 bit systems"
#endif
|
thdat.c | /*
* Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the
* following conditions are met:
*
* 1. Redistributions of source code must retain this list
* of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce this
* list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <thtk/thtk.h>
#include "program.h"
#include "util.h"
#include "mygetopt.h"
static void
print_usage(
void)
{
printf("Usage: %s [-V] [[-c | -l | -x] VERSION] [ARCHIVE [FILE...]]\n"
"Options:\n"
" -c create an archive\n"
" -l list the contents of an archive\n"
" -x extract an archive\n"
" -V display version information and exit\n"
"VERSION can be:\n"
" 1, 2, 3, 4, 5, 6, 7, 8, 9, 95, 10, 103 (for Uwabami Breakers), 105, 11, 12, 123, 125, 128, 13, 14, 143, 15, 16, 165, 17 or 18\n"
/* NEWHU: */
"Specify 'd' as VERSION to automatically detect archive format. (-l and -x only)\n\n"
"Report bugs to <" PACKAGE_BUGREPORT ">.\n", argv0);
}
static void
print_error(
thtk_error_t* error)
{
fprintf(stderr, "%s:%s\n", argv0, thtk_error_message(error));
}
typedef struct {
thdat_t* thdat;
thtk_io_t* stream;
} thdat_state_t;
static thdat_state_t*
thdat_state_alloc(void)
{
thdat_state_t* state = malloc(sizeof(*state));
state->thdat = NULL;
state->stream = NULL;
return state;
}
static void
thdat_state_free(
thdat_state_t* state)
{
if (state) {
if (state->thdat)
thdat_free(state->thdat);
if (state->stream)
thtk_io_close(state->stream);
free(state);
}
}
static thdat_state_t*
thdat_open_file(
unsigned int version,
const char* path,
thtk_error_t** error)
{
thdat_state_t* state = thdat_state_alloc();
if (!(state->stream = thtk_io_open_file(path, "rb", error))) {
thdat_state_free(state);
return NULL;
}
if (!(state->thdat = thdat_open(version, state->stream, error))) {
thdat_state_free(state);
return NULL;
}
return state;
}
static int
thdat_extract_file(
thdat_state_t* state,
size_t entry_index,
thtk_error_t** error)
{
const char* entry_name;
thtk_io_t* entry_stream;
if (!(entry_name = thdat_entry_get_name(state->thdat, entry_index, error)))
return 0;
// For th105: Make sure that the directory exists
util_makepath(entry_name);
if (!(entry_stream = thtk_io_open_file(entry_name, "wb", error)))
return 0;
if (thdat_entry_read_data(state->thdat, entry_index, entry_stream, error) == -1) {
thtk_io_close(entry_stream);
return 0;
}
printf("%s\n", entry_name);
thtk_io_close(entry_stream);
return 1;
}
static int
thdat_list(
unsigned int version,
const char* path,
thtk_error_t** error)
{
thdat_state_t* state = thdat_open_file(version, path, error);
if(!state) {
return 0;
}
ssize_t entry_count;
struct {
const char* name;
ssize_t size;
ssize_t zsize;
}* entries;
ssize_t e;
int name_width = 4;
if ((entry_count = thdat_entry_count(state->thdat, error)) == -1) {
thdat_state_free(state);
return 0;
}
entries = malloc(entry_count * sizeof(*entries));
#pragma omp parallel /* reduction(max:name_width) */
{
#pragma omp for
for (e = 0; e < entry_count; ++e) {
thtk_error_t* error = NULL;
entries[e].name = thdat_entry_get_name(state->thdat, e, &error);
entries[e].size = thdat_entry_get_size(state->thdat, e, &error);
entries[e].zsize = thdat_entry_get_zsize(state->thdat, e, &error);
if (!entries[e].name || entries[e].size == -1 || entries[e].zsize == -1) {
print_error(error);
thtk_error_free(&error);
continue;
}
int entry_name_width = strlen(entries[e].name);
#pragma omp critical
if (entry_name_width > name_width)
name_width = entry_name_width;
}
}
// th105: Stored = Size
if (version == 105 || version == 123)
printf("%-*s %7s\n", name_width, "Name", "Size");
else
printf("%-*s %7s %7s\n", name_width, "Name", "Size", "Stored");
for (e = 0; e < entry_count; ++e) {
if (version == 105 || version == 123)
printf("%-*s %7zd\n", name_width, entries[e].name, entries[e].size);
else
printf("%-*s %7zd %7zd\n", name_width, entries[e].name, entries[e].size, entries[e].zsize);
}
free(entries);
thdat_state_free(state);
return 1;
}
static int
thdat_create_wrapper(
unsigned int version,
const char* path,
const char** paths,
size_t entry_count,
thtk_error_t** error)
{
thdat_state_t* state = thdat_state_alloc();
char*** entries = calloc(entry_count, sizeof(char**));
char** realpaths;
int* entries_count = calloc(entry_count, sizeof(int));
size_t real_entry_count = 0;
if (!(state->stream = thtk_io_open_file(path, "wb", error))) {
thdat_state_free(state);
exit(1);
}
for (size_t i = 0; i < entry_count; i++) {
int n = util_scan_files(paths[i], &entries[i]);
if (n == -1) {
entries[i] = calloc(1, sizeof(char*));
entries[i][0] = malloc(strlen(paths[i])+1);
strcpy(entries[i][0], paths[i]);
n = 1;
}
entries_count[i] = n;
real_entry_count += n;
}
if (!(state->thdat = thdat_create(version, state->stream, real_entry_count, error))) {
thdat_state_free(state);
exit(1);
}
// Set entry names first...
realpaths = calloc(real_entry_count, sizeof(char*));
size_t k = 0;
for (size_t i = 0; i < entry_count; ++i) {
thtk_error_t* error = NULL;
for (size_t j = 0; j < entries_count[i]; j++) {
if (!thdat_entry_set_name(state->thdat, k, entries[i][j], &error)) {
print_error(error);
thtk_error_free(&error);
continue;
}
realpaths[k] = malloc(strlen(entries[i][j])+1);
strcpy(realpaths[k], entries[i][j]);
k++;
free(entries[i][j]);
}
free(entries[i]);
}
free(entries);
free(entries_count);
// ...and then module->create, if this is th105 archive.
// This is because the list of entries comes first in th105 archives.
if (version == 105 || version == 123) {
if (!thdat_init(state->thdat, error))
{
thdat_state_free(state);
exit(1);
}
}
k = 0;
/* TODO: Properly indicate when insertion fails. */
ssize_t i;
#pragma omp parallel for schedule(dynamic)
for (i = 0; i < real_entry_count; ++i) {
thtk_error_t* error = NULL;
thtk_io_t* entry_stream;
off_t entry_size;
printf("%s...\n", thdat_entry_get_name(state->thdat, i, &error));
// Is entry name set?
if (!(thdat_entry_get_name(state->thdat, i, &error))[0])
continue;
if (!(entry_stream = thtk_io_open_file(realpaths[i], "rb", &error))) {
print_error(error);
thtk_error_free(&error);
continue;
}
if ((entry_size = thtk_io_seek(entry_stream, 0, SEEK_END, &error)) == -1) {
print_error(error);
thtk_error_free(&error);
continue;
}
if (thtk_io_seek(entry_stream, 0, SEEK_SET, &error) == -1) {
print_error(error);
thtk_error_free(&error);
continue;
}
if (thdat_entry_write_data(state->thdat, i, entry_stream, entry_size, &error) == -1) {
print_error(error);
thtk_error_free(&error);
continue;
}
thtk_io_close(entry_stream);
free(realpaths[i]);
}
free(realpaths);
int ret = 1;
if (!thdat_close(state->thdat, error))
ret = 0;
thdat_state_free(state);
return ret;
}
/* TODO: Make sure errors are printed in all cases. */
int
main(
int argc,
char* argv[])
{
thtk_error_t* error = NULL;
unsigned int version = 0;
int mode = -1;
argv0 = util_shortname(argv[0]);
int opt;
int ind=0;
while(argv[util_optind]) {
switch(opt = util_getopt(argc, argv, ":c:l:x:Vd")) {
case 'c':
case 'l':
case 'x':
case 'd':
if(mode != -1) {
fprintf(stderr,"%s: More than one mode specified\n",argv0);
print_usage();
exit(1);
}
mode = opt;
if((opt == 'x' || mode == 'l') && *util_optarg == 'd') {
version = ~0;
}
else if(opt != 'd') version = parse_version(util_optarg);
break;
default:
util_getopt_default(&ind,argv,opt,print_usage);
}
}
argc = ind;
argv[argc] = NULL;
/* detect version */
if(argc && (mode == 'x' || mode == 'l') && version == ~0) {
thtk_io_t* file;
if(!(file = thtk_io_open_file(argv[0], "rb", &error))) {
print_error(error);
thtk_error_free(&error);
exit(1);
}
uint32_t out[4];
unsigned int heur;
printf("Detecting '%s'...\n",argv[0]);
if(-1 == thdat_detect(argv[0], file, out, &heur, &error)) {
thtk_io_close(file);
print_error(error);
thtk_error_free(&error);
exit(1);
}
if(heur == -1) {
const thdat_detect_entry_t* ent;
printf("Couldn't detect version!\nPossible versions: ");
while((ent = thdat_detect_iter(out))) {
printf("%d,",ent->alias);
}
printf("\n");
thtk_io_close(file);
exit(1);
}
else {
printf("Detected version %d\n",heur);
version = heur;
}
thtk_io_close(file);
}
switch (mode) {
case 'd': {
if (argc < 1) {
print_usage();
exit(1);
}
for (int i = 0; i < argc; i++) {
thtk_io_t* file;
if (!(file = thtk_io_open_file(argv[i], "rb", &error))) {
print_error(error);
thtk_error_free(&error);
exit(1);
}
uint32_t out[4];
unsigned int heur;
printf("Detecting '%s'... ",argv[i]);
if (-1 == thdat_detect(argv[i], file, out, &heur, &error)) {
printf("\n");
thtk_io_close(file);
print_error(error);
thtk_error_free(&error);
continue;
}
const thdat_detect_entry_t* ent;
printf("%d | possible versions: ", heur);
while((ent = thdat_detect_iter(out))) {
printf("%d,",ent->alias);
}
printf(" | filename: %d\n", thdat_detect_filename(argv[i]));
thtk_io_close(file);
}
exit(0);
}
case 'l': {
if (argc < 1) {
print_usage();
exit(1);
}
if (!thdat_list(version, argv[0], &error)) {
print_error(error);
thtk_error_free(&error);
exit(1);
}
exit(0);
}
case 'c': {
if (argc < 2) {
print_usage();
exit(1);
}
if (!thdat_create_wrapper(version, argv[0], (const char**)&argv[1], argc - 1, &error)) {
print_error(error);
thtk_error_free(&error);
exit(1);
}
exit(0);
}
case 'x': {
if (argc < 1) {
print_usage();
exit(1);
}
thdat_state_t* state = thdat_open_file(version, argv[0], &error);
if (!state) {
print_error(error);
thtk_error_free(&error);
exit(1);
}
if (argc > 1) {
ssize_t a;
#pragma omp parallel for schedule(dynamic)
for (a = 1; a < argc; ++a) {
thtk_error_t* error = NULL;
int entry_index;
if ((entry_index = thdat_entry_by_name(state->thdat, argv[a], &error)) == -1) {
print_error(error);
thtk_error_free(&error);
continue;
}
if (!thdat_extract_file(state, entry_index, &error)) {
print_error(error);
thtk_error_free(&error);
continue;
}
}
} else {
ssize_t entry_count;
if ((entry_count = thdat_entry_count(state->thdat, &error)) == -1) {
print_error(error);
thtk_error_free(&error);
exit(1);
}
ssize_t entry_index;
#pragma omp parallel for schedule(dynamic)
for (entry_index = 0; entry_index < entry_count; ++entry_index) {
thtk_error_t* error = NULL;
if (!thdat_extract_file(state, entry_index, &error)) {
print_error(error);
thtk_error_free(&error);
continue;
}
}
}
thdat_state_free(state);
exit(0);
}
default:
print_usage();
exit(1);
}
}
|
mscash1_fmt_plug.c | /* MSCASH patch for john (performance improvement)
*
* Modified for utf-8 support by magnum in 2011, same terms as below
*
* Written by Alain Espinosa <alainesp at gmail.com> in 2007. No copyright
* is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
* Copyright (c) 2007 Alain Espinosa and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
* (This is a heavily cut-down "BSD license".)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mscash;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mscash);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "unicode.h"
#include "options.h"
#include "loader.h"
#include "johnswap.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "mscash"
#define FORMAT_NAME "MS Cache Hash (DCC)"
#define ALGORITHM_NAME "MD4 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 27
#define MAX_CIPHERTEXT_LENGTH (2 + 19*3 + 1 + 32) // x3 because salt may be UTF-8 in input
/* Note: some tests will be replaced in init() if running UTF-8 */
static struct fmt_tests tests[] = {
{"176a4c2bd45ac73687676c2f09045353", "", {"root"} }, // nullstring password
{"M$test2#ab60bdb4493822b175486810ac2abe63", "test2" },
{"M$test1#64cd29e36a8431a2b111378564a10631", "test1" },
{"M$test1#64cd29e36a8431a2b111378564a10631", "test1" },
{"M$test1#64cd29e36a8431a2b111378564a10631", "test1" },
{"M$test3#14dd041848e12fc48c0aa7a416a4a00c", "test3" },
{"M$test4#b945d24866af4b01a6d89b9d932a153c", "test4" },
{"64cd29e36a8431a2b111378564a10631", "test1", {"TEST1"} }, // salt is lowercased before hashing
{"290efa10307e36a79b3eebf2a6b29455", "okolada", {"nineteen_characters"} }, // max salt length
{"ab60bdb4493822b175486810ac2abe63", "test2", {"test2"} },
{"b945d24866af4b01a6d89b9d932a153c", "test4", {"test4"} },
{NULL}
};
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE (11*4)
#define SALT_ALIGN 4
#define OK_NUM_KEYS 64
#define BEST_NUM_KEYS 512
#ifdef _OPENMP
#define MS_NUM_KEYS (OK_NUM_KEYS * 96)
#else
#define MS_NUM_KEYS BEST_NUM_KEYS
#endif
#define MIN_KEYS_PER_CRYPT OK_NUM_KEYS
#define MAX_KEYS_PER_CRYPT MS_NUM_KEYS
static unsigned int *ms_buffer1x;
static unsigned int *output1x;
static unsigned int *crypt_out;
static unsigned int *last;
static unsigned int *last_i;
static unsigned int *salt_buffer;
static unsigned int new_key;
//Init values
#define INIT_A 0x67452301
#define INIT_B 0xefcdab89
#define INIT_C 0x98badcfe
#define INIT_D 0x10325476
#define SQRT_2 0x5a827999
#define SQRT_3 0x6ed9eba1
static void set_key_utf8(char *_key, int index);
static void set_key_encoding(char *_key, int index);
static void * get_salt_utf8(char *_ciphertext);
static void * get_salt_encoding(char *_ciphertext);
struct fmt_main fmt_mscash;
#if !ARCH_LITTLE_ENDIAN
static inline void swap(unsigned int *x, int count)
{
while (count--) {
*x = JOHNSWAP(*x);
x++;
}
}
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int n = omp_get_max_threads(), nmin, nmax;
if (n < 1)
n = 1;
nmin = OK_NUM_KEYS - (OK_NUM_KEYS % n);
if (nmin < n)
nmin = n;
fmt_mscash.params.min_keys_per_crypt = nmin;
nmax = n * BEST_NUM_KEYS;
if (nmax > MS_NUM_KEYS)
nmax = MS_NUM_KEYS;
fmt_mscash.params.max_keys_per_crypt = nmax;
#endif
ms_buffer1x = mem_calloc_tiny(sizeof(ms_buffer1x[0]) * 16*fmt_mscash.params.max_keys_per_crypt, MEM_ALIGN_WORD);
output1x = mem_calloc_tiny(sizeof(output1x[0]) * 4*fmt_mscash.params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(crypt_out[0]) * 4*fmt_mscash.params.max_keys_per_crypt, MEM_ALIGN_WORD);
last = mem_calloc_tiny(sizeof(last[0]) * 4*fmt_mscash.params.max_keys_per_crypt, MEM_ALIGN_WORD);
last_i = mem_calloc_tiny(sizeof(last_i[0]) * fmt_mscash.params.max_keys_per_crypt, MEM_ALIGN_WORD);
new_key=1;
if (pers_opts.target_enc == UTF_8) {
fmt_mscash.methods.set_key = set_key_utf8;
fmt_mscash.methods.salt = get_salt_utf8;
fmt_mscash.params.plaintext_length = (PLAINTEXT_LENGTH * 3);
tests[1].ciphertext = "M$\xC3\xBC#48f84e6f73d6d5305f6558a33fa2c9bb";
tests[1].plaintext = "\xC3\xBC"; // German u-umlaut in UTF-8
tests[2].ciphertext = "M$user#9121790702dda0fa5d353014c334c2ce";
tests[2].plaintext = "\xe2\x82\xac\xe2\x82\xac"; // 2 x Euro signs
} else if (pers_opts.target_enc == ASCII || pers_opts.target_enc == ISO_8859_1) {
tests[1].ciphertext = "M$\xFC#48f84e6f73d6d5305f6558a33fa2c9bb";
tests[1].plaintext = "\xFC"; // German u-umlaut in UTF-8
tests[2].ciphertext = "M$\xFC\xFC#593246a8335cf0261799bda2a2a9c623";
tests[2].plaintext = "\xFC\xFC"; // 2 x Euro signs
} else {
fmt_mscash.methods.set_key = set_key_encoding;
fmt_mscash.methods.salt = get_salt_encoding;
}
}
static char * ms_split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[MAX_CIPHERTEXT_LENGTH + 1];
int i;
for(i = 0; i < MAX_CIPHERTEXT_LENGTH && ciphertext[i]; i++)
out[i]=ciphertext[i];
out[i]=0;
// lowercase salt as well as hash, encoding-aware
enc_strlwr(&out[2]);
return out;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
unsigned int i;
unsigned int l;
char insalt[3*19+1];
UTF16 realsalt[21];
int saltlen;
if (strncmp(ciphertext, "M$", 2))
return 0;
l = strlen(ciphertext);
if (l <= 32 || l > MAX_CIPHERTEXT_LENGTH)
return 0;
l -= 32;
if(ciphertext[l-1]!='#')
return 0;
for (i = l; i < l + 32; i++)
if (atoi16[ARCH_INDEX(ciphertext[i])] == 0x7F)
return 0;
// This is tricky: Max supported salt length is 19 characters of Unicode
saltlen = enc_to_utf16(realsalt, 20, (UTF8*)strnzcpy(insalt, &ciphertext[2], l - 2), l - 3);
if (saltlen < 0 || saltlen > 19) {
static int warned = 0;
if (!ldr_in_pot)
if (!warned++)
fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL);
return 0;
}
return 1;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char *cp;
int i;
if (!strncmp(split_fields[1], "M$", 2) || !split_fields[0])
return split_fields[1];
if (!split_fields[0])
return split_fields[1];
// ONLY check, if this string split_fields[1], is ONLY a 32 byte hex string.
for (i = 0; i < 32; i++)
if (atoi16[ARCH_INDEX(split_fields[1][i])] == 0x7F)
return split_fields[1];
cp = mem_alloc(strlen(split_fields[0]) + strlen(split_fields[1]) + 4);
sprintf (cp, "M$%s#%s", split_fields[0], split_fields[1]);
if (valid(cp, self))
{
char *cipher = str_alloc_copy(cp);
MEM_FREE(cp);
return cipher;
}
MEM_FREE(cp);
return split_fields[1];
}
static void set_salt(void *salt) {
salt_buffer=salt;
}
static void *get_salt(char *_ciphertext)
{
unsigned char *ciphertext = (unsigned char *)_ciphertext;
// length=11 for save memory
// position 10 = length
// 0-9 = 1-19 Unicode characters + EOS marker (0x80)
static unsigned int *out=0;
unsigned int md4_size;
if (!out) out = mem_alloc_tiny(11*sizeof(unsigned int), MEM_ALIGN_WORD);
memset(out,0,11*sizeof(unsigned int));
ciphertext+=2;
for(md4_size = 0 ;; md4_size++)
if(md4_size < 19 && ciphertext[md4_size]!='#')
{
md4_size++;
out[md4_size>>1] = ciphertext[md4_size-1] | ((ciphertext[md4_size]!='#') ? (ciphertext[md4_size]<<16) : 0x800000);
if(ciphertext[md4_size]=='#')
break;
}
else
{
out[md4_size>>1] = 0x80;
break;
}
out[10] = (8 + md4_size) << 4;
// dump_stuff(out, 44);
return out;
}
static void *get_salt_encoding(char *_ciphertext) {
unsigned char *ciphertext = (unsigned char *)_ciphertext;
unsigned char input[19*3+1];
int utf16len, md4_size;
static UTF16 *out=0;
if (!out) out = mem_alloc_tiny(22*sizeof(UTF16), MEM_ALIGN_WORD);
memset(out, 0, 22*sizeof(UTF16));
ciphertext += 2;
for (md4_size=0;md4_size<sizeof(input)-1;md4_size++) {
if (ciphertext[md4_size] == '#')
break;
input[md4_size] = ciphertext[md4_size];
}
input[md4_size] = 0;
utf16len = enc_to_utf16(out, 19, input, md4_size);
if (utf16len < 0)
utf16len = strlen16(out);
#if ARCH_LITTLE_ENDIAN
out[utf16len] = 0x80;
#else
out[utf16len] = 0x8000;
swap((unsigned int*)out, (md4_size>>1)+1);
#endif
((unsigned int*)out)[10] = (8 + utf16len) << 4;
// dump_stuff(out, 44);
return out;
}
static void * get_salt_utf8(char *_ciphertext)
{
unsigned char *ciphertext = (unsigned char *)_ciphertext;
unsigned int md4_size;
UTF16 ciphertext_utf16[21];
int len;
static ARCH_WORD_32 *out=0;
if (!out) out = mem_alloc_tiny(11*sizeof(ARCH_WORD_32), MEM_ALIGN_WORD);
memset(out, 0, 11*sizeof(ARCH_WORD_32));
ciphertext+=2;
len = ((unsigned char*)strchr((char*)ciphertext, '#')) - ciphertext;
utf8_to_utf16(ciphertext_utf16, 20, ciphertext, len+1);
for(md4_size = 0 ;; md4_size++) {
#if !ARCH_LITTLE_ENDIAN
ciphertext_utf16[md4_size] = (ciphertext_utf16[md4_size]>>8)|(ciphertext_utf16[md4_size]<<8);
#endif
if(md4_size < 19 && ciphertext_utf16[md4_size]!=(UTF16)'#') {
md4_size++;
#if !ARCH_LITTLE_ENDIAN
ciphertext_utf16[md4_size] = (ciphertext_utf16[md4_size]>>8)|(ciphertext_utf16[md4_size]<<8);
#endif
out[md4_size>>1] = ciphertext_utf16[md4_size-1] |
((ciphertext_utf16[md4_size]!=(UTF16)'#') ?
(ciphertext_utf16[md4_size]<<16) : 0x800000);
if(ciphertext_utf16[md4_size]==(UTF16)'#')
break;
}
else {
out[md4_size>>1] = 0x80;
break;
}
}
out[10] = (8 + md4_size) << 4;
return out;
}
static void *get_binary(char *ciphertext)
{
static unsigned int out[BINARY_SIZE/sizeof(unsigned int)];
unsigned int i=0;
unsigned int temp;
unsigned int *salt=fmt_mscash.methods.salt(ciphertext);
for(;ciphertext[0]!='#';ciphertext++);
ciphertext++;
for(; i<4 ;i++)
{
temp = (atoi16[ARCH_INDEX(ciphertext[i*8+0])])<<4;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+1])]);
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+2])])<<12;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+3])])<<8;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+4])])<<20;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+5])])<<16;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+6])])<<28;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+7])])<<24;
out[i]=temp;
}
out[0] -= INIT_A;
out[1] -= INIT_B;
out[2] -= INIT_C;
out[3] -= INIT_D;
// Reversed b += (c ^ d ^ a) + salt_buffer[11] + SQRT_3; b = (b << 15) | (b >> 17);
out[1] = (out[1] >> 15) | (out[1] << 17);
out[1] -= SQRT_3 + (out[2] ^ out[3] ^ out[0]);
// Reversed c += (d ^ a ^ b) + salt_buffer[3] + SQRT_3; c = (c << 11) | (c >> 21);
out[2] = (out[2] << 21) | (out[2] >> 11);
out[2]-= SQRT_3 + (out[3] ^ out[0] ^ out[1]) + salt[3];
// Reversed d += (a ^ b ^ c) + salt_buffer[7] + SQRT_3; d = (d << 9 ) | (d >> 23);
out[3] = (out[3] << 23) | (out[3] >> 9);
out[3] -= SQRT_3 + (out[0] ^ out[1] ^ out[2]) + salt[7];
//+ SQRT_3; d = (d << 9 ) | (d >> 23);
out[3]=(out[3] << 23 ) | (out[3] >> 9);
out[3]-=SQRT_3;
return out;
}
static int binary_hash_0(void *binary) { return ((unsigned int*)binary)[3] & 0x0F; }
static int binary_hash_1(void *binary) { return ((unsigned int*)binary)[3] & 0xFF; }
static int binary_hash_2(void *binary) { return ((unsigned int*)binary)[3] & 0x0FFF; }
static int binary_hash_3(void *binary) { return ((unsigned int*)binary)[3] & 0x0FFFF; }
static int binary_hash_4(void *binary) { return ((unsigned int*)binary)[3] & 0x0FFFFF; }
static int binary_hash_5(void *binary) { return ((unsigned int*)binary)[3] & 0x0FFFFFF; }
static int binary_hash_6(void *binary) { return ((unsigned int*)binary)[3] & 0x07FFFFFF; }
static int get_hash_0(int index) { return output1x[4*index+3] & 0x0F; }
static int get_hash_1(int index) { return output1x[4*index+3] & 0xFF; }
static int get_hash_2(int index) { return output1x[4*index+3] & 0x0FFF; }
static int get_hash_3(int index) { return output1x[4*index+3] & 0x0FFFF; }
static int get_hash_4(int index) { return output1x[4*index+3] & 0x0FFFFF; }
static int get_hash_5(int index) { return output1x[4*index+3] & 0x0FFFFFF; }
static int get_hash_6(int index) { return output1x[4*index+3] & 0x07FFFFFF; }
static void nt_hash(int count)
{
int i;
#if MS_NUM_KEYS > 1 && defined(_OPENMP)
#pragma omp parallel for default(none) private(i) shared(count, ms_buffer1x, crypt_out, last)
#endif
for (i = 0; i < count; i++)
{
unsigned int a;
unsigned int b;
unsigned int c;
unsigned int d;
/* Round 1 */
a = 0xFFFFFFFF + ms_buffer1x[16*i+0];a = (a << 3 ) | (a >> 29);
d = INIT_D + (INIT_C ^ (a & 0x77777777)) + ms_buffer1x[16*i+1];d = (d << 7 ) | (d >> 25);
c = INIT_C + (INIT_B ^ (d & (a ^ INIT_B)))+ ms_buffer1x[16*i+2];c = (c << 11) | (c >> 21);
b = INIT_B + (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+3];b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+4] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+5] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+6] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+7] ;b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+8] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+9] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+10] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+11] ;b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+12] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+13] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+14] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a)))/*+ms_buffer1x[16*i+15]*/;b = (b << 19) | (b >> 13);
/* Round 2 */
a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+4] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+8] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+12] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+5] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+9] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+13] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+6] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+10] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+14] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+7] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+11] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a))/*+ms_buffer1x[16*i+15]*/+SQRT_2; b = (b << 13) | (b >> 19);
/* Round 3 */
a += (b ^ c ^ d) + ms_buffer1x[16*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + ms_buffer1x[16*i+8] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + ms_buffer1x[16*i+4] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) + ms_buffer1x[16*i+12] + SQRT_3; b = (b << 15) | (b >> 17);
a += (b ^ c ^ d) + ms_buffer1x[16*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + ms_buffer1x[16*i+10] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + ms_buffer1x[16*i+6] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) + ms_buffer1x[16*i+14] + SQRT_3; b = (b << 15) | (b >> 17);
a += (b ^ c ^ d) + ms_buffer1x[16*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + ms_buffer1x[16*i+9] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + ms_buffer1x[16*i+5] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) + ms_buffer1x[16*i+13] + SQRT_3; b = (b << 15) | (b >> 17);
a += (b ^ c ^ d) + ms_buffer1x[16*i+3] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + ms_buffer1x[16*i+11] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + ms_buffer1x[16*i+7] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) /*+ ms_buffer1x[16*i+15] */+ SQRT_3; b = (b << 15) | (b >> 17);
crypt_out[4*i+0] = a + INIT_A;
crypt_out[4*i+1] = b + INIT_B;
crypt_out[4*i+2] = c + INIT_C;
crypt_out[4*i+3] = d + INIT_D;
//Another MD4_crypt for the salt
/* Round 1 */
a= 0xFFFFFFFF +crypt_out[4*i+0]; a=(a<<3 )|(a>>29);
d=INIT_D + ( INIT_C ^ ( a & 0x77777777)) +crypt_out[4*i+1]; d=(d<<7 )|(d>>25);
c=INIT_C + ( INIT_B ^ ( d & ( a ^ INIT_B))) +crypt_out[4*i+2]; c=(c<<11)|(c>>21);
b=INIT_B + ( a ^ ( c & ( d ^ a ))) +crypt_out[4*i+3]; b=(b<<19)|(b>>13);
last[4*i+0]=a;
last[4*i+1]=b;
last[4*i+2]=c;
last[4*i+3]=d;
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int i;
if(new_key)
{
new_key=0;
nt_hash(count);
}
#if MS_NUM_KEYS > 1 && defined(_OPENMP)
#pragma omp parallel for default(none) private(i) shared(count, last, crypt_out, salt_buffer, output1x)
#endif
for(i = 0; i < count; i++)
{
unsigned int a;
unsigned int b;
unsigned int c;
unsigned int d;
a = last[4*i+0];
b = last[4*i+1];
c = last[4*i+2];
d = last[4*i+3];
a += (d ^ (b & (c ^ d))) + salt_buffer[0] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + salt_buffer[1] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + salt_buffer[2] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + salt_buffer[3] ;b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + salt_buffer[4] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + salt_buffer[5] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + salt_buffer[6] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + salt_buffer[7] ;b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + salt_buffer[8] ;a = (a << 3 ) | (a >> 29);
d += (c ^ (a & (b ^ c))) + salt_buffer[9] ;d = (d << 7 ) | (d >> 25);
c += (b ^ (d & (a ^ b))) + salt_buffer[10] ;c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a)))/*+salt_buffer[11]*/;b = (b << 19) | (b >> 13);
/* Round 2 */
a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + salt_buffer[0] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + salt_buffer[4] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + salt_buffer[8] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + salt_buffer[1] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + salt_buffer[5] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + salt_buffer[9] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + salt_buffer[2] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + salt_buffer[6] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + salt_buffer[10] + SQRT_2; b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + salt_buffer[3] + SQRT_2; d = (d << 5 ) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + salt_buffer[7] + SQRT_2; c = (c << 9 ) | (c >> 23);
b += ((c & (d | a)) | (d & a))/*+ salt_buffer[11]*/+ SQRT_2; b = (b << 13) | (b >> 19);
/* Round 3 */
a += (b ^ c ^ d) + crypt_out[4*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + salt_buffer[4] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + salt_buffer[0] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) + salt_buffer[8] + SQRT_3; b = (b << 15) | (b >> 17);
a += (b ^ c ^ d) + crypt_out[4*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + salt_buffer[6] + SQRT_3; d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + salt_buffer[2] + SQRT_3; c = (c << 11) | (c >> 21);
b += (c ^ d ^ a) + salt_buffer[10] + SQRT_3; b = (b << 15) | (b >> 17);
a += (b ^ c ^ d) + crypt_out[4*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29);
d += (a ^ b ^ c) + salt_buffer[5];
output1x[4*i+0]=a;
output1x[4*i+1]=b;
output1x[4*i+2]=c;
output1x[4*i+3]=d;
}
return count;
}
static int cmp_all(void *binary, int count)
{
unsigned int i=0;
unsigned int d=((unsigned int *)binary)[3];
for(;i<count;i++)
if(d==output1x[i*4+3])
return 1;
return 0;
}
static int cmp_one(void * binary, int index)
{
unsigned int *t=(unsigned int *)binary;
unsigned int a=output1x[4*index+0];
unsigned int b=output1x[4*index+1];
unsigned int c=output1x[4*index+2];
unsigned int d=output1x[4*index+3];
if(d!=t[3])
return 0;
d+=SQRT_3;d = (d << 9 ) | (d >> 23);
c += (d ^ a ^ b) + salt_buffer[1] + SQRT_3; c = (c << 11) | (c >> 21);
if(c!=t[2])
return 0;
b += (c ^ d ^ a) + salt_buffer[9] + SQRT_3; b = (b << 15) | (b >> 17);
if(b!=t[1])
return 0;
a += (b ^ c ^ d) + crypt_out[4*index+3]+ SQRT_3; a = (a << 3 ) | (a >> 29);
return (a==t[0]);
}
static int cmp_exact(char *source, int index)
{
// This check is for the unreal case of collisions.
// It verifies that the salts are the same.
unsigned int *salt=fmt_mscash.methods.salt(source);
unsigned int i=0;
for(;i<11;i++)
if(salt[i]!=salt_buffer[i])
return 0;
return 1;
}
// This is common code for the SSE/MMX/generic variants of non-UTF8 set_key
static inline void set_key_helper(unsigned int * keybuffer,
unsigned int xBuf,
const unsigned char * key,
unsigned int lenStoreOffset,
unsigned int *last_length)
{
unsigned int i=0;
unsigned int md4_size=0;
for(; key[md4_size] && md4_size < PLAINTEXT_LENGTH; i += xBuf, md4_size++)
{
unsigned int temp;
if ((temp = key[++md4_size]))
{
keybuffer[i] = key[md4_size-1] | (temp << 16);
}
else
{
keybuffer[i] = key[md4_size-1] | 0x800000;
goto key_cleaning;
}
}
keybuffer[i] = 0x80;
key_cleaning:
i += xBuf;
for(;i <= *last_length; i += xBuf)
keybuffer[i] = 0;
*last_length = (md4_size >> 1)+1;
keybuffer[lenStoreOffset] = md4_size << 4;
}
static void set_key(char *_key, int index)
{
set_key_helper(&ms_buffer1x[index << 4], 1, (unsigned char *)_key, 14,
&last_i[index]);
//new password_candidate
new_key=1;
}
// UTF-8 conversion right into key buffer
// This is common code for the SSE/MMX/generic variants
static inline void set_key_helper_utf8(unsigned int * keybuffer, unsigned int xBuf,
const UTF8 * source, unsigned int lenStoreOffset, unsigned int *lastlen)
{
unsigned int *target = keybuffer;
UTF32 chl, chh = 0x80;
unsigned int outlen = 0;
while (*source) {
chl = *source;
if (chl >= 0xC0) {
unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 2:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 1:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 0:
break;
default:
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
chl -= offsetsFromUTF8[extraBytesToRead];
}
source++;
outlen++;
if (chl > UNI_MAX_BMP) {
if (outlen == PLAINTEXT_LENGTH) {
chh = 0x80;
*target = (chh << 16) | chl;
target += xBuf;
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
break;
}
#define halfBase 0x0010000UL
#define halfShift 10
#define halfMask 0x3FFUL
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_LOW_START (UTF32)0xDC00
chl -= halfBase;
chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);;
chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START);
outlen++;
} else if (*source && outlen < PLAINTEXT_LENGTH) {
chh = *source;
if (chh >= 0xC0) {
unsigned int extraBytesToRead =
opt_trailingBytesUTF8[chh & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 2:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 1:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else {
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
case 0:
break;
default:
*lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf;
return;
}
chh -= offsetsFromUTF8[extraBytesToRead];
}
source++;
outlen++;
} else {
chh = 0x80;
*target = chh << 16 | chl;
target += xBuf;
break;
}
*target = chh << 16 | chl;
target += xBuf;
}
if (chh != 0x80 || outlen == 0) {
*target = 0x80;
target += xBuf;
}
while(target < &keybuffer[*lastlen]) {
*target = 0;
target += xBuf;
}
*lastlen = ((outlen >> 1) + 1) * xBuf;
keybuffer[lenStoreOffset] = outlen << 4;
}
static void set_key_utf8(char *_key, int index)
{
set_key_helper_utf8(&ms_buffer1x[index << 4], 1, (UTF8 *)_key, 14,
&last_i[index]);
//new password_candidate
new_key=1;
}
// This is common code for the SSE/MMX/generic variants of non-UTF8 non-ISO-8859-1 set_key
static inline void set_key_helper_encoding(unsigned int * keybuffer,
unsigned int xBuf,
const unsigned char * key,
unsigned int lenStoreOffset,
unsigned int *last_length)
{
unsigned int i=0;
int md4_size;
md4_size = enc_to_utf16( (UTF16 *)keybuffer, PLAINTEXT_LENGTH, (UTF8 *) key, strlen((char*)key));
if (md4_size < 0)
md4_size = strlen16((UTF16 *)keybuffer);
#if ARCH_LITTLE_ENDIAN
((UTF16*)keybuffer)[md4_size] = 0x80;
#else
((UTF16*)keybuffer)[md4_size] = 0x8000;
#endif
((UTF16*)keybuffer)[md4_size+1] = 0;
#if !ARCH_LITTLE_ENDIAN
((UTF16*)keybuffer)[md4_size+2] = 0;
#endif
i = md4_size>>1;
i += xBuf;
for(;i <= *last_length; i += xBuf)
keybuffer[i] = 0;
#if !ARCH_LITTLE_ENDIAN
swap(keybuffer, (md4_size>>1)+1);
#endif
*last_length = (md4_size >> 1) + 1;
keybuffer[lenStoreOffset] = md4_size << 4;
}
static void set_key_encoding(char *_key, int index)
{
set_key_helper_encoding(&ms_buffer1x[index << 4], 1, (unsigned char *)_key, 14,
&last_i[index]);
//new password_candidate
new_key=1;
}
// Get the key back from the key buffer, from UCS-2 LE
static char *get_key(int index)
{
static union {
UTF16 u16[PLAINTEXT_LENGTH + 1];
unsigned int u32[(PLAINTEXT_LENGTH + 1 + 1) / 2];
} key;
unsigned int * keybuffer = &ms_buffer1x[index << 4];
unsigned int md4_size;
unsigned int i=0;
int len = keybuffer[14] >> 4;
for(md4_size = 0; md4_size < len; i++, md4_size += 2)
{
#if ARCH_LITTLE_ENDIAN
key.u16[md4_size] = keybuffer[i];
key.u16[md4_size+1] = keybuffer[i] >> 16;
#else
key.u16[md4_size] = keybuffer[i] >> 16;
key.u16[md4_size+1] = keybuffer[i];
#endif
}
#if !ARCH_LITTLE_ENDIAN
swap(key.u32, md4_size >> 1);
#endif
key.u16[len] = 0x00;
return (char *)utf16_to_enc(key.u16);
}
// Public domain hash function by DJ Bernstein (salt is a username)
static int salt_hash(void *salt)
{
UTF16 *s = salt;
unsigned int hash = 5381;
while (*s != 0x80)
hash = ((hash << 5) + hash) ^ *s++;
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_mscash = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
prepare,
valid,
ms_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
util.c | #include <mpi.h>
#include "precision.h"
#include "util.h"
/* Use the modified Gram-Schmidt process to compute (in place) the portion of
* the n-dimensional vector v orthogonal to each of the nv vectors s. The
* projection * of the vector onto each of the basis vectors is stored in the
* length-nv array c. This is actually a selective reorthogonalization scheme
* to attempt to correct rounding errors. */
int cmgs (cplx *v, cplx *c, cplx *s, long n, int nv) {
long i, j;
cplx *sv, cv;
real vnrm, lcrit;
/* Perform the first modified Gram Schmidt orthogonalization. */
for (i = 0, sv = s, lcrit = 0.; i < nv; ++i, sv += n) {
/* The projection of the vector onto the current basis. */
c[i] = pardot (sv, v, n);
/* Track the 1-norm of the projection column. */
lcrit += cabs(c[i]);
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < n; ++j) v[j] -= c[i] * sv[j];
}
/* Compute the norm of the vector. */
c[nv] = parnorm (v, n);
vnrm = creal(c[nv]);
/* Reorthogonalize if necessary. */
if (lcrit / vnrm > IMGS_L) {
for (i = 0, sv = s; i < nv; ++i, sv += n) {
/* Re-project the vector onto the current basis. */
cv = pardot (sv, v, n);
/* Update the projection. */
c[i] += cv;
/* Remove the remaining parallel component. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < n; ++j) v[j] -= cv * sv[j];
}
/* Update the norm of the orthogonal vector. */
c[nv] = parnorm (v, n);
vnrm = creal(c[nv]);
}
/* Don't normalize if the norm is vanishing. */
if (vnrm < REAL_EPSILON) return n;
/* Finally, normalize the newly-created vector. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < n; ++j) v[j] /= vnrm;
return n;
}
/* Compute the inner product of the distributed vectors x and y of dimension n. */
cplx pardot (cplx *x, cplx *y, long n) {
/* Always compute the product in double precision to avoid rounding errors. */
complex double dp = 0.0;
long i;
#pragma omp parallel for default(shared) private(i) reduction(+: dp)
for (i = 0; i < n; ++i) dp += conj(x[i]) * y[i];
/* Add in the contributions from other processors. */
MPI_Allreduce (MPI_IN_PLACE, &dp, 2, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
return (cplx)dp;
}
real parnorm (cplx *x, long n) {
/* Always compute the norm in double precision to avoid rounding errors. */
double nrm = 0.0, nr, ni;
long i;
#pragma omp parallel for default(shared) private(nr,ni,i) reduction(+: nrm)
for (i = 0; i < n; ++i) {
nr = creal(x[i]);
nr *= nr;
ni = cimag(x[i]);
ni *= ni;
nrm += nr + ni;
}
/* Sum over processors and reduce. */
MPI_Allreduce (MPI_IN_PLACE, &nrm, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
return (real)sqrt(nrm);
}
/* The RMS error between a test vector and a reference. */
real mse (cplx *test, cplx *ref, long n, int nrm) {
long i;
real err[2] = {0.0, 0.0}, tmp;
/* Calculate local contributions to the mean-squared error. */
for (i = 0; i < n; ++i) {
tmp = cabs(test[i] - ref[i]);
err[0] += tmp * tmp;
tmp = cabs(ref[i]);
err[1] += tmp * tmp;
}
/* Sum the local contributions. */
MPI_Allreduce (MPI_IN_PLACE, err, 2, MPIREAL, MPI_SUM, MPI_COMM_WORLD);
/* Return the normalized MSE if desired, otherwise just the difference. */
if (nrm) return sqrt(err[0] / err[1]);
return sqrt(err[0]);
}
/* Compute the sinc of the argument. */
real sinc (real x) {
return ((fabs(x) < REAL_EPSILON) ? 1.0 : (sin (M_PI * x) / (M_PI * x)));
}
/* Compute the coordinates of the indexed far-field sample. */
int sampcoords (real *s, int i, int nt, int np) {
int pi, ti;
real st, theta, phi;
/* The first sample is the south pole.
* No further computation is required. */
if (i == 0) {
s[0] = s[1] = 0.0;
s[2] = -1.0;
return 0;
}
/* Compute the sample indices in phi and theta. */
pi = (i - 1) % np;
ti = (i - 1) / np + 1;
/* The sample is the north pole at the final theta index. */
if (ti == nt - 1) {
s[0] = s[1] = 0.0;
s[2] = 1.0;
return 0;
}
/* Compute the angular position from the sample indices. */
phi = 2 * M_PI * pi / (real) np;
theta = M_PI * (1. - ti / (real) (nt - 1));
/* Compute the cartesian position. */
st = sin(theta);
s[0] = cos(phi) * st;
s[1] = sin(phi) * st;
s[2] = cos(theta);
return 0;
}
/* Compute the relative coordinates of a cell within a group of cells. */
int cellcoords (real *r, int l, int bpd, real dx) {
int idx[3];
GRID(idx, l, bpd, bpd);
r[0] = 0.5 * dx * (2.0 * idx[0] + 1.0 - (real)bpd);
r[1] = 0.5 * dx * (2.0 * idx[1] + 1.0 - (real)bpd);
r[2] = 0.5 * dx * (2.0 * idx[2] + 1.0 - (real)bpd);
return 0;
}
/* Determine if the elemnt i is in the set s of length l. */
int inset (int i, int *s, int l) {
int j;
for (j = 0; j < l; ++j)
if (i == s[j]) return 1;
return 0;
}
/* Find the index of the maximum value in set of length n, ignoring indices in
* the set excl of length nex. */
int maxind (cplx *set, int n, int *excl, int nex) {
real mv, cv;
int mi, i;
for (i = 0, mv = -1.0, mi = -1; i < n; ++i) {
/* Skip the element if it is in the exclusion list. */
if (inset(i, excl, nex)) continue;
/* Determine if this value is the maximum. */
cv = cabs(set[i]);
if (cv > mv) {
mi = i;
mv = cv;
}
}
return mi;
}
/* Calculate the Legendre polynomial of order m and its derivative at a point t. */
static int legendre (real *p, real *dp, real t, int m) {
real p0 = 1.0, p1 = t;
int k;
/* Handle low orders explicitly. */
if (m < 1) {
*p = 1.0;
*dp = 0.0;
return m;
} else if (m < 2) {
*p = t;
*dp = 1.0;
return m;
}
/* Otherwise, calculate the function values recursively. */
for (k = 1; k < m; ++k) {
*p = ((2.0 * k + 1.0) * t * p1 - k * p0) / (1.0 + k);
p0 = p1; p1 = *p;
}
*dp = m * (p0 - t * p1) / (1.0 - t * t);
return m;
}
/* Compute Gaussian quadrature nodes and weights. */
int gaussleg (real *nodes, real *weights, int m) {
int i, j, nroots = (m + 1) / 2;
real t, p, dp, dt;
const real tol = REAL_EPSILON;
const int maxit = 100;
for (i = 0; i < nroots; ++i) {
/* Use the Chebyshev roots as an initial guess. */
t = cos (M_PI * (i + 0.75) / (m + 0.5));
for (j = 0; j < maxit; ++j) {
/* Compute the value of the Legendre polynomial. */
legendre (&p, &dp, t, m);
/* Perform a Newton-Raphson update. */
dt = -p / dp; t += dt;
/* Break if convergence detected. */
if (fabs(dt) < tol) break;
}
/* Update the nodes and weights. */
nodes[i] = t;
nodes[m - i - 1] = -t;
weights[i] = 2.0 / (1.0 - t * t) / (dp * dp);
weights[m - i - 1] = weights[i];
}
return 0;
}
|
smg3_setup_rap.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
#include "smg.h"
/*--------------------------------------------------------------------------
* Sets up new coarse grid operator stucture.
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_SMG3CreateRAPOp( hypre_StructMatrix *R,
hypre_StructMatrix *A,
hypre_StructMatrix *PT,
hypre_StructGrid *coarse_grid )
{
hypre_StructMatrix *RAP;
hypre_Index *RAP_stencil_shape;
hypre_StructStencil *RAP_stencil;
HYPRE_Int RAP_stencil_size;
HYPRE_Int RAP_stencil_dim;
HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1};
hypre_StructStencil *A_stencil;
HYPRE_Int A_stencil_size;
HYPRE_Int k, j, i;
HYPRE_Int stencil_rank;
RAP_stencil_dim = 3;
A_stencil = hypre_StructMatrixStencil(A);
A_stencil_size = hypre_StructStencilSize(A_stencil);
/*-----------------------------------------------------------------------
* Define RAP_stencil
*-----------------------------------------------------------------------*/
stencil_rank = 0;
/*-----------------------------------------------------------------------
* non-symmetric case
*-----------------------------------------------------------------------*/
if (!hypre_StructMatrixSymmetric(A))
{
/*--------------------------------------------------------------------
* 7 or 15 point fine grid stencil produces 15 point RAP
*--------------------------------------------------------------------*/
if( A_stencil_size <= 15)
{
RAP_stencil_size = 15;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 2; k++)
{
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------
* Storage for c,w,e,n,s elements in each plane
*--------------------------------------------------------*/
if( i*j == 0 )
{
hypre_SetIndex3(RAP_stencil_shape[stencil_rank],i,j,k);
stencil_rank++;
}
}
}
}
}
/*--------------------------------------------------------------------
* 19 or 27 point fine grid stencil produces 27 point RAP
*--------------------------------------------------------------------*/
else
{
RAP_stencil_size = 27;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 2; k++)
{
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------
* Storage for 9 elements (c,w,e,n,s,sw,se,nw,se) in
* each plane
*--------------------------------------------------------*/
hypre_SetIndex3(RAP_stencil_shape[stencil_rank],i,j,k);
stencil_rank++;
}
}
}
}
}
/*-----------------------------------------------------------------------
* symmetric case
*-----------------------------------------------------------------------*/
else
{
/*--------------------------------------------------------------------
* 7 or 15 point fine grid stencil produces 15 point RAP
* Only store the lower triangular part + diagonal = 8 entries,
* lower triangular means the lower triangular part on the matrix
* in the standard lexicalgraphic ordering.
*--------------------------------------------------------------------*/
if( A_stencil_size <= 15)
{
RAP_stencil_size = 8;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 1; k++)
{
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------
* Store 5 elements in lower plane (c,w,e,s,n)
* and 3 elements in same plane (c,w,s)
*--------------------------------------------------------*/
if( i*j == 0 && i+j+k <= 0)
{
hypre_SetIndex3(RAP_stencil_shape[stencil_rank],i,j,k);
stencil_rank++;
}
}
}
}
}
/*--------------------------------------------------------------------
* 19 or 27 point fine grid stencil produces 27 point RAP
* Only store the lower triangular part + diagonal = 14 entries,
* lower triangular means the lower triangular part on the matrix
* in the standard lexicalgraphic ordering.
*--------------------------------------------------------------------*/
else
{
RAP_stencil_size = 14;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 1; k++)
{
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------
* Store 9 elements in lower plane (c,w,e,s,n,sw,se,nw,ne)
* and 5 elements in same plane (c,w,s,sw,se)
*--------------------------------------------------------*/
if( k < 0 || (i+j+k <=0 && j < 1) )
{
hypre_SetIndex3(RAP_stencil_shape[stencil_rank],i,j,k);
stencil_rank++;
}
}
}
}
}
}
RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size,
RAP_stencil_shape);
RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A),
coarse_grid, RAP_stencil);
hypre_StructStencilDestroy(RAP_stencil);
/*-----------------------------------------------------------------------
* Coarse operator in symmetric iff fine operator is
*-----------------------------------------------------------------------*/
hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A);
/*-----------------------------------------------------------------------
* Set number of ghost points
*-----------------------------------------------------------------------*/
if (hypre_StructMatrixSymmetric(A))
{
RAP_num_ghost[1] = 0;
RAP_num_ghost[3] = 0;
RAP_num_ghost[5] = 0;
}
hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost);
return RAP;
}
/*--------------------------------------------------------------------------
* Routines to build RAP. These routines are fairly general
* 1) No assumptions about symmetry of A
* 2) No assumption that R = transpose(P)
* 3) 7,15,19 or 27-point fine grid A
*
* I am, however, assuming that the c-to-c interpolation is the identity.
*
* I've written a two routines - hypre_SMG3BuildRAPSym to build the lower
* triangular part of RAP (including the diagonal) and
* hypre_SMG3BuildRAPNoSym to build the upper triangular part of RAP
* (excluding the diagonal). So using symmetric storage, only the first
* routine would be called. With full storage both would need to be called.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMG3BuildRAPSym( hypre_StructMatrix *A,
hypre_StructMatrix *PT,
hypre_StructMatrix *R,
hypre_StructMatrix *RAP,
hypre_Index cindex,
hypre_Index cstride )
{
hypre_Index index;
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int fi, ci;
hypre_Box *A_dbox;
hypre_Box *PT_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
HYPRE_Real *pa, *pb;
HYPRE_Real *ra, *rb;
HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
HYPRE_Real *a_ac, *a_aw, *a_as;
HYPRE_Real *a_bc, *a_bw, *a_be, *a_bs, *a_bn;
HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne;
HYPRE_Real *a_asw, *a_ase;
HYPRE_Real *a_bsw, *a_bse, *a_bnw, *a_bne;
HYPRE_Real *rap_cc, *rap_cw, *rap_cs;
HYPRE_Real *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn;
HYPRE_Real *rap_csw, *rap_cse;
HYPRE_Real *rap_bsw, *rap_bse, *rap_bnw, *rap_bne;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
stridef = cstride;
hypre_SetIndex3(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
PT_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(PT), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,1);
pa = hypre_StructMatrixExtractPointerByIndex(PT, fi, index);
hypre_SetIndex3(index,0,0,-1);
pb = hypre_StructMatrixExtractPointerByIndex(PT, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,1);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex3(index,0,0,-1);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,0);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,0,0);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,0,0);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,-1,0);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,1,0);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,0,1);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,0,-1);
a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 15-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
*-----------------------------------------------------------------*/
if(fine_stencil_size > 7)
{
hypre_SetIndex3(index,-1,0,1);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,-1,1);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,0,-1);
a_bw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,0,-1);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,-1,-1);
a_bs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,1,-1);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
if(fine_stencil_size > 15)
{
hypre_SetIndex3(index,-1,-1,0);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,-1,0);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,1,0);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,1,0);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point fine grid operator:
*
* a_asw is pointer for southwest coefficient in plane above
* a_ase is pointer for southeast coefficient in plane above
* a_anw is pointer for northwest coefficient in plane above
* a_ane is pointer for northeast coefficient in plane above
* a_bsw is pointer for southwest coefficient in plane below
* a_bse is pointer for southeast coefficient in plane below
* a_bnw is pointer for northwest coefficient in plane below
* a_bne is pointer for northeast coefficient in plane below
*-----------------------------------------------------------------*/
if(fine_stencil_size > 19)
{
hypre_SetIndex3(index,-1,-1,1);
a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,-1,1);
a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,-1,-1);
a_bsw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,-1,-1);
a_bse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,1,-1);
a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,1,-1);
a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract pointers for 15-point coarse grid operator:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,0);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,0,0);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,-1,0);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,0,-1);
rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,0,-1);
rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,0,-1);
rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,-1,-1);
rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,1,-1);
rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the lower triangular part.
*
* rap_csw is pointer for southwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
if(fine_stencil_size > 15)
{
hypre_SetIndex3(index,-1,-1,0);
rap_csw =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,-1,0);
rap_cse =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,-1,-1);
rap_bsw =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,-1,-1);
rap_bse =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,1,-1);
rap_bnw =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,1,-1);
rap_bne =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
}
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,1);
zOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
zOffsetP = hypre_BoxOffsetDistance(PT_dbox,index);
hypre_SetIndex3(index,0,1,0);
yOffsetP = hypre_BoxOffsetDistance(PT_dbox,index);
hypre_SetIndex3(index,1,0,0);
xOffsetP = hypre_BoxOffsetDistance(PT_dbox,index);
/*--------------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for symmetric 7-point fine grid operator; produces a
* symmetric 15-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-south, below-west,
* below-center, below-east, below-north, center-south,
* center-west, and center-center).
*--------------------------------------------------------------*/
case 7:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for symmetric 15-point fine grid operator; produces a
* symmetric 15-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-south, below-west,
* below-center, below-east, below-north, center-south,
* center-west, and center-center).
*--------------------------------------------------------------*/
case 15:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1]
+ rb[iR] * a_bs[iAm1]
+ a_bs[iA] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_bw[iAm1]
+ a_bw[iA] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_be[iAm1]
+ a_be[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1]
+ rb[iR] * a_bn[iAm1]
+ a_bn[iA] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1]
+ a_bs[iA] * pb[iP1]
+ a_as[iA] * pa[iP1]
+ rb[iR] * a_as[iAm1]
+ ra[iR] * a_bs[iAp1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ a_bw[iA] * pb[iP1]
+ a_aw[iA] * pa[iP1]
+ rb[iR] * a_aw[iAm1]
+ ra[iR] * a_bw[iAp1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for symmetric 19-point fine grid operator; produces a
* symmetric 27-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-southwest, below-south,
* below-southeast, below-west, below-center, below-east,
* below-northwest, below-north, below-northeast, center-southwest,
* center-south, center-southeast, center-west, and center-center).
*--------------------------------------------------------------*/
case 19:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP - xOffsetP;
rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1]
+ rb[iR] * a_bs[iAm1]
+ a_bs[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP + xOffsetP;
rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_bw[iAm1]
+ a_bw[iA] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_be[iAm1]
+ a_be[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP - xOffsetP;
rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1]
+ rb[iR] * a_bn[iAm1]
+ a_bn[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP + xOffsetP;
rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1];
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = a_csw[iA]
+ rb[iR] * a_csw[iAm1] * pb[iP1]
+ ra[iR] * a_csw[iAp1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1]
+ a_bs[iA] * pb[iP1]
+ a_as[iA] * pa[iP1]
+ rb[iR] * a_as[iAm1]
+ ra[iR] * a_bs[iAp1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = a_cse[iA]
+ rb[iR] * a_cse[iAm1] * pb[iP1]
+ ra[iR] * a_cse[iAp1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ a_bw[iA] * pb[iP1]
+ a_aw[iA] * pa[iP1]
+ rb[iR] * a_aw[iAm1]
+ ra[iR] * a_bw[iAp1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for symmetric 27-point fine grid operator; produces a
* symmetric 27-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-southwest, below-south,
* below-southeast, below-west, below-center, below-east,
* below-northwest, below-north, below-northeast, center-southwest,
* center-south, center-southeast, center-west, and center-center).
*--------------------------------------------------------------*/
default:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP - xOffsetP;
rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1]
+ rb[iR] * a_bsw[iAm1]
+ a_bsw[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1]
+ rb[iR] * a_bs[iAm1]
+ a_bs[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP + xOffsetP;
rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1]
+ rb[iR] * a_bse[iAm1]
+ a_bse[iA] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_bw[iAm1]
+ a_bw[iA] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_be[iAm1]
+ a_be[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP - xOffsetP;
rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1]
+ rb[iR] * a_bnw[iAm1]
+ a_bnw[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1]
+ rb[iR] * a_bn[iAm1]
+ a_bn[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP + xOffsetP;
rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1]
+ rb[iR] * a_bne[iAm1]
+ a_bne[iA] * pa[iP1];
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = a_csw[iA]
+ rb[iR] * a_csw[iAm1] * pb[iP1]
+ ra[iR] * a_csw[iAp1] * pa[iP1]
+ a_bsw[iA] * pb[iP1]
+ a_asw[iA] * pa[iP1]
+ rb[iR] * a_asw[iAm1]
+ ra[iR] * a_bsw[iAp1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1]
+ a_bs[iA] * pb[iP1]
+ a_as[iA] * pa[iP1]
+ rb[iR] * a_as[iAm1]
+ ra[iR] * a_bs[iAp1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = a_cse[iA]
+ rb[iR] * a_cse[iAm1] * pb[iP1]
+ ra[iR] * a_cse[iAp1] * pa[iP1]
+ a_bse[iA] * pb[iP1]
+ a_ase[iA] * pa[iP1]
+ rb[iR] * a_ase[iAm1]
+ ra[iR] * a_bse[iAp1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ a_bw[iA] * pb[iP1]
+ a_aw[iA] * pa[iP1]
+ rb[iR] * a_aw[iAm1]
+ ra[iR] * a_bw[iAp1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
} /* end switch statement */
} /* end ForBoxI */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMG3BuildRAPNoSym( hypre_StructMatrix *A,
hypre_StructMatrix *PT,
hypre_StructMatrix *R,
hypre_StructMatrix *RAP,
hypre_Index cindex,
hypre_Index cstride )
{
hypre_Index index;
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int fi, ci;
hypre_Box *A_dbox;
hypre_Box *PT_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
HYPRE_Real *pa, *pb;
HYPRE_Real *ra, *rb;
HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
HYPRE_Real *a_ac, *a_aw, *a_ae, *a_as, *a_an;
HYPRE_Real *a_be, *a_bn;
HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne;
HYPRE_Real *a_asw, *a_ase, *a_anw, *a_ane;
HYPRE_Real *a_bnw, *a_bne;
HYPRE_Real *rap_ce, *rap_cn;
HYPRE_Real *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an;
HYPRE_Real *rap_cnw, *rap_cne;
HYPRE_Real *rap_asw, *rap_ase, *rap_anw, *rap_ane;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
stridef = cstride;
hypre_SetIndex3(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
PT_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(PT), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,1);
pa = hypre_StructMatrixExtractPointerByIndex(PT, fi, index);
hypre_SetIndex3(index,0,0,-1);
pb = hypre_StructMatrixExtractPointerByIndex(PT, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,1);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex3(index,0,0,-1);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,0);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,0,0);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,0,0);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,-1,0);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,1,0);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,0,1);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 15-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
*-----------------------------------------------------------------*/
if(fine_stencil_size > 7)
{
hypre_SetIndex3(index,-1,0,1);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,0,1);
a_ae = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,-1,1);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,1,1);
a_an = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,0,-1);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,0,1,-1);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
if(fine_stencil_size > 15)
{
hypre_SetIndex3(index,-1,-1,0);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,-1,0);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,1,0);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,1,0);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point fine grid operator:
*
* a_asw is pointer for southwest coefficient in plane above
* a_ase is pointer for southeast coefficient in plane above
* a_anw is pointer for northwest coefficient in plane above
* a_ane is pointer for northeast coefficient in plane above
* a_bsw is pointer for southwest coefficient in plane below
* a_bse is pointer for southeast coefficient in plane below
* a_bnw is pointer for northwest coefficient in plane below
* a_bne is pointer for northeast coefficient in plane below
*-----------------------------------------------------------------*/
if(fine_stencil_size > 19)
{
hypre_SetIndex3(index,-1,-1,1);
a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,-1,1);
a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,1,1);
a_anw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,1,1);
a_ane = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,-1,1,-1);
a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index,1,1,-1);
a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract pointers for 15-point coarse grid operator:
*
* We build only the upper triangular part (excluding diagonal).
*
* rap_ce is pointer for east coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,1,0,0);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,1,0);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,0,1);
rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,0,1);
rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,0,1);
rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,-1,1);
rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,1,1);
rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the upper triangular part.
*
* rap_cnw is pointer for northwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
if(fine_stencil_size > 15)
{
hypre_SetIndex3(index,-1,1,0);
rap_cnw =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,1,0);
rap_cne =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,-1,1);
rap_asw =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,-1,1);
rap_ase =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,1,1);
rap_anw =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,1,1);
rap_ane =
hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
}
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,1);
zOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
zOffsetP = hypre_BoxOffsetDistance(PT_dbox,index);
hypre_SetIndex3(index,0,1,0);
yOffsetP = hypre_BoxOffsetDistance(PT_dbox,index);
hypre_SetIndex3(index,1,0,0);
xOffsetP = hypre_BoxOffsetDistance(PT_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for 7-point fine grid operator; produces upper triangular
* part of 15-point coarse grid operator. stencil entries:
* (above-north, above-east, above-center, above-west,
* above-south, center-north, and center-east).
*--------------------------------------------------------------*/
case 7:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for 15-point fine grid operator; produces upper triangular
* part of 15-point coarse grid operator. stencil entries:
* (above-north, above-east, above-center, above-west,
* above-south, center-north, and center-east).
*--------------------------------------------------------------*/
case 15:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1]
+ ra[iR] * a_an[iAp1]
+ a_an[iA] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_ae[iAp1]
+ a_ae[iA] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_aw[iAp1]
+ a_aw[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1]
+ ra[iR] * a_as[iAp1]
+ a_as[iA] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1]
+ a_bn[iA] * pb[iP1]
+ a_an[iA] * pa[iP1]
+ rb[iR] * a_an[iAm1]
+ ra[iR] * a_bn[iAp1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ a_be[iA] * pb[iP1]
+ a_ae[iA] * pa[iP1]
+ rb[iR] * a_ae[iAm1]
+ ra[iR] * a_be[iAp1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for 19-point fine grid operator; produces upper triangular
* part of 27-point coarse grid operator. stencil entries:
* (above-northeast, above-north, above-northwest, above-east,
* above-center, above-west, above-southeast, above-south,
* above-southwest, center-northeast, center-north,
* center-northwest, and center-east).
*--------------------------------------------------------------*/
case 19:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP + xOffsetP;
rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1]
+ ra[iR] * a_an[iAp1]
+ a_an[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP - xOffsetP;
rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_ae[iAp1]
+ a_ae[iA] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_aw[iAp1]
+ a_aw[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP + xOffsetP;
rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1]
+ ra[iR] * a_as[iAp1]
+ a_as[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP - xOffsetP;
rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1];
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = a_cne[iA]
+ rb[iR] * a_cne[iAm1] * pb[iP1]
+ ra[iR] * a_cne[iAp1] * pa[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1]
+ a_bn[iA] * pb[iP1]
+ a_an[iA] * pa[iP1]
+ rb[iR] * a_an[iAm1]
+ ra[iR] * a_bn[iAp1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = a_cnw[iA]
+ rb[iR] * a_cnw[iAm1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1] * pa[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ a_be[iA] * pb[iP1]
+ a_ae[iA] * pa[iP1]
+ rb[iR] * a_ae[iAm1]
+ ra[iR] * a_be[iAp1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for 27-point fine grid operator; produces upper triangular
* part of 27-point coarse grid operator. stencil entries:
* (above-northeast, above-north, above-northwest, above-east,
* above-center, above-west, above-southeast, above-south,
* above-southwest, center-northeast, center-north,
* center-northwest, and center-east).
*--------------------------------------------------------------*/
default:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
PT_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP + xOffsetP;
rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1]
+ ra[iR] * a_ane[iAp1]
+ a_ane[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1]
+ ra[iR] * a_an[iAp1]
+ a_an[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP - xOffsetP;
rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1]
+ ra[iR] * a_anw[iAp1]
+ a_anw[iA] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_ae[iAp1]
+ a_ae[iA] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_aw[iAp1]
+ a_aw[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP + xOffsetP;
rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1]
+ ra[iR] * a_ase[iAp1]
+ a_ase[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1]
+ ra[iR] * a_as[iAp1]
+ a_as[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP - xOffsetP;
rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1]
+ ra[iR] * a_asw[iAp1]
+ a_asw[iA] * pb[iP1];
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = a_cne[iA]
+ rb[iR] * a_cne[iAm1] * pb[iP1]
+ ra[iR] * a_cne[iAp1] * pa[iP1]
+ a_bne[iA] * pb[iP1]
+ a_ane[iA] * pa[iP1]
+ rb[iR] * a_ane[iAm1]
+ ra[iR] * a_bne[iAp1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1]
+ a_bn[iA] * pb[iP1]
+ a_an[iA] * pa[iP1]
+ rb[iR] * a_an[iAm1]
+ ra[iR] * a_bn[iAp1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = a_cnw[iA]
+ rb[iR] * a_cnw[iAm1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1] * pa[iP1]
+ a_bnw[iA] * pb[iP1]
+ a_anw[iA] * pa[iP1]
+ rb[iR] * a_anw[iAm1]
+ ra[iR] * a_bnw[iAp1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ a_be[iA] * pb[iP1]
+ a_ae[iA] * pa[iP1]
+ rb[iR] * a_ae[iAm1]
+ ra[iR] * a_be[iAp1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
} /* end switch statement */
} /* end ForBoxI */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Collapses stencil in periodic direction on coarsest grid.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMG3RAPPeriodicSym( hypre_StructMatrix *RAP,
hypre_Index cindex,
hypre_Index cstride )
{
hypre_Index index;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index loop_size;
HYPRE_Int ci;
hypre_Box *RAP_dbox;
HYPRE_Real *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn;
HYPRE_Real *rap_cc, *rap_cw, *rap_cs;
HYPRE_Real *rap_bsw, *rap_bse, *rap_bnw, *rap_bne;
HYPRE_Real *rap_csw, *rap_cse;
HYPRE_Int iAc;
HYPRE_Int iAcmx;
HYPRE_Int iAcmy;
HYPRE_Int iAcmxmy;
HYPRE_Int iAcpxmy;
HYPRE_Int xOffset;
HYPRE_Int yOffset;
HYPRE_Real zero = 0.0;
hypre_StructStencil *stencil;
HYPRE_Int stencil_size;
stencil = hypre_StructMatrixStencil(RAP);
stencil_size = hypre_StructStencilSize(stencil);
hypre_SetIndex3(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
if (hypre_IndexZ(hypre_StructGridPeriodic(cgrid)) == 1)
{
hypre_StructMatrixAssemble(RAP);
hypre_ForBoxI(ci, cgrid_boxes)
{
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
hypre_SetIndex3(index,1,0,0);
xOffset = hypre_BoxOffsetDistance(RAP_dbox,index);
hypre_SetIndex3(index,0,1,0);
yOffset = hypre_BoxOffsetDistance(RAP_dbox,index);
/*-----------------------------------------------------------------
* Extract pointers for 15-point coarse grid operator:
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,-1);
rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,0,-1);
rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,0,-1);
rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,-1,-1);
rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,1,-1);
rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,0,0);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,0,0);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,-1,0);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*-----------------------------------------------------------------*/
if(stencil_size == 27)
{
hypre_SetIndex3(index,-1,-1,-1);
rap_bsw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,-1,-1);
rap_bse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,1,-1);
rap_bnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,1,-1);
rap_bne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,-1,0);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,-1,0);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
}
/*-----------------------------------------------------------------
* Collapse 15 point operator.
*-----------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructMatrixNDim(RAP), loop_size,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc,iAcmx,iAcmy) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
iAcmx = iAc - xOffset;
iAcmy = iAc - yOffset;
rap_cc[iAc] += (2.0 * rap_bc[iAc]);
rap_cw[iAc] += (rap_bw[iAc] + rap_be[iAcmx]);
rap_cs[iAc] += (rap_bs[iAc] + rap_bn[iAcmy]);
}
hypre_BoxLoop1End(iAc);
hypre_BoxLoop1Begin(hypre_StructMatrixNDim(RAP), loop_size,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
rap_bc[iAc] = zero;
rap_bw[iAc] = zero;
rap_be[iAc] = zero;
rap_bs[iAc] = zero;
rap_bn[iAc] = zero;
}
hypre_BoxLoop1End(iAc);
/*-----------------------------------------------------------------
* Collapse additional entries for 27 point operator.
*-----------------------------------------------------------------*/
if (stencil_size == 27)
{
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructMatrixNDim(RAP), loop_size,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc,iAcmxmy,iAcpxmy) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
iAcmxmy = iAc - xOffset - yOffset;
iAcpxmy = iAc + xOffset - yOffset;
rap_csw[iAc] += (rap_bsw[iAc] + rap_bne[iAcmxmy]);
rap_cse[iAc] += (rap_bse[iAc] + rap_bnw[iAcpxmy]);
}
hypre_BoxLoop1End(iAc);
hypre_BoxLoop1Begin(hypre_StructMatrixNDim(RAP), loop_size,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
rap_bsw[iAc] = zero;
rap_bse[iAc] = zero;
rap_bnw[iAc] = zero;
rap_bne[iAc] = zero;
}
hypre_BoxLoop1End(iAc);
}
} /* end ForBoxI */
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Collapses stencil in periodic direction on coarsest grid.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMG3RAPPeriodicNoSym( hypre_StructMatrix *RAP,
hypre_Index cindex,
hypre_Index cstride )
{
hypre_Index index;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index loop_size;
HYPRE_Int ci;
hypre_Box *RAP_dbox;
HYPRE_Real *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn;
HYPRE_Real *rap_cc, *rap_cw, *rap_ce, *rap_cs, *rap_cn;
HYPRE_Real *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an;
HYPRE_Real *rap_bsw, *rap_bse, *rap_bnw, *rap_bne;
HYPRE_Real *rap_csw, *rap_cse, *rap_cnw, *rap_cne;
HYPRE_Real *rap_asw, *rap_ase, *rap_anw, *rap_ane;
HYPRE_Int iAc;
HYPRE_Real zero = 0.0;
hypre_StructStencil *stencil;
HYPRE_Int stencil_size;
stencil = hypre_StructMatrixStencil(RAP);
stencil_size = hypre_StructStencilSize(stencil);
hypre_SetIndex3(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
if (hypre_IndexZ(hypre_StructGridPeriodic(cgrid)) == 1)
{
hypre_ForBoxI(ci, cgrid_boxes)
{
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for 15-point coarse grid operator:
*-----------------------------------------------------------------*/
hypre_SetIndex3(index,0,0,-1);
rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,0,-1);
rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,0,-1);
rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,-1,-1);
rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,1,-1);
rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,0,0);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,0,0);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,0,0);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,-1,0);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,1,0);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,0,1);
rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,0,1);
rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,0,1);
rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,-1,1);
rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,0,1,1);
rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*-----------------------------------------------------------------*/
if(stencil_size == 27)
{
hypre_SetIndex3(index,-1,-1,-1);
rap_bsw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,-1,-1);
rap_bse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,1,-1);
rap_bnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,1,-1);
rap_bne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,-1,0);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,-1,0);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,1,0);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,1,0);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,-1,1);
rap_asw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,-1,1);
rap_ase = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,-1,1,1);
rap_anw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index,1,1,1);
rap_ane = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
}
/*-----------------------------------------------------------------
* Collapse 15 point operator.
*-----------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructMatrixNDim(RAP), loop_size,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
rap_cc[iAc] += (rap_bc[iAc] + rap_ac[iAc]);
rap_bc[iAc] = zero;
rap_ac[iAc] = zero;
rap_cw[iAc] += (rap_bw[iAc] + rap_aw[iAc]);
rap_bw[iAc] = zero;
rap_aw[iAc] = zero;
rap_ce[iAc] += (rap_be[iAc] + rap_ae[iAc]);
rap_be[iAc] = zero;
rap_ae[iAc] = zero;
rap_cs[iAc] += (rap_bs[iAc] + rap_as[iAc]);
rap_bs[iAc] = zero;
rap_as[iAc] = zero;
rap_cn[iAc] += (rap_bn[iAc] + rap_an[iAc]);
rap_bn[iAc] = zero;
rap_an[iAc] = zero;
}
hypre_BoxLoop1End(iAc);
/*-----------------------------------------------------------------
* Collapse additional entries for 27 point operator.
*-----------------------------------------------------------------*/
if (stencil_size == 27)
{
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructMatrixNDim(RAP), loop_size,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
rap_csw[iAc] += (rap_bsw[iAc] + rap_asw[iAc]);
rap_bsw[iAc] = zero;
rap_asw[iAc] = zero;
rap_cse[iAc] += (rap_bse[iAc] + rap_ase[iAc]);
rap_bse[iAc] = zero;
rap_ase[iAc] = zero;
rap_cnw[iAc] += (rap_bnw[iAc] + rap_anw[iAc]);
rap_bnw[iAc] = zero;
rap_anw[iAc] = zero;
rap_cne[iAc] += (rap_bne[iAc] + rap_ane[iAc]);
rap_bne[iAc] = zero;
rap_ane[iAc] = zero;
}
hypre_BoxLoop1End(iAc);
}
} /* end ForBoxI */
}
return hypre_error_flag;
}
|
GB_unaryop__lnot_fp64_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_int8
// op(A') function: GB_tran__lnot_fp64_int8
// C type: double
// A type: int8_t
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_int8
(
double *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ne_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_01__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_fp32)
// A*D function (colscale): GB (_AxD__ne_fp32)
// D*A function (rowscale): GB (_DxB__ne_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_fp32)
// C=scalar+B GB (_bind1st__ne_fp32)
// C=scalar+B' GB (_bind1st_tran__ne_fp32)
// C=A+scalar GB (_bind2nd__ne_fp32)
// C=A'+scalar GB (_bind2nd_tran__ne_fp32)
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_FP32 || GxB_NO_NE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ne_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
floram_util.c | #include "floram_util.h"
#include <omp.h>
#include <unistd.h>
void get_random_bytes(void *buf, size_t bytes) {
//only supported on recent linuxes, unfortunately.
//getrandom(buf, bytes, 0);
FILE *fp = fopen("/dev/urandom", "r");
if (fread(buf, 1, bytes, fp) != bytes) {
fprintf(stderr,"Could not read random bytes.");
exit(1);
}
fclose(fp);
}
int floram_pma(void** dst, size_t alignment, size_t size) {
return posix_memalign(dst, alignment, size);
}
int floram_zpma(void** dst, size_t alignment, size_t size) {
int res = posix_memalign(dst, alignment, size);
memset(*dst, 0, size);
return res;
}
uint32_t floram_atomic_read(uint32_t * x) {
return __atomic_load_n(x, __ATOMIC_RELAXED);
}
void floram_atomic_inc(uint32_t * x) {
return __atomic_fetch_add(x, 1, __ATOMIC_RELAXED);
}
int floram_usleep(uint64_t x) {
return usleep(x);
}
void floram_set_procs_for_data_size(size_t dsize) {
#ifndef FLORAM_DISABLE_AUTO_THREAD_COUNT
size_t recommended_cores = (dsize + CACHE_PER_CORE - 1) / CACHE_PER_CORE;
size_t actual_cores = MIN(omp_get_num_procs(), MAX(1, recommended_cores));
omp_set_num_threads(actual_cores);
#endif
}
#ifdef __AES__
#include <wmmintrin.h>
#include <tmmintrin.h>
#define KE(NK,OK,RND) NK = OK; \
NK = _mm_xor_si128(NK, _mm_slli_si128(NK, 4)); \
NK = _mm_xor_si128(NK, _mm_slli_si128(NK, 4)); \
NK = _mm_xor_si128(NK, _mm_slli_si128(NK, 4)); \
OK = _mm_xor_si128(NK, _mm_shuffle_epi32(_mm_aeskeygenassist_si128(OK, RND), 0xff));
#define KE2(NK,OK,RND) NK = OK; \
NK = _mm_xor_si128(NK, _mm_slli_si128(NK, 4)); \
NK = _mm_xor_si128(NK, _mm_slli_si128(NK, 4)); \
NK = _mm_xor_si128(NK, _mm_slli_si128(NK, 4)); \
NK = _mm_xor_si128(NK, _mm_shuffle_epi32(_mm_aeskeygenassist_si128(OK, RND), 0xff));
void offline_prg_init() {
// Do nothing
return;
}
void * offline_prg_keyschedule(uint8_t * src) {
__m128i * r = malloc(11*sizeof(__m128i));
r[0] = _mm_load_si128((__m128i *) src);
KE2(r[1], r[0], 0x01)
KE2(r[2], r[1], 0x02)
KE2(r[3], r[2], 0x04)
KE2(r[4], r[3], 0x08)
KE2(r[5], r[4], 0x10)
KE2(r[6], r[5], 0x20)
KE2(r[7], r[6], 0x40)
KE2(r[8], r[7], 0x80)
KE2(r[9], r[8], 0x1b)
KE2(r[10], r[9], 0x36)
return r;
}
void offline_prg(uint8_t * dest, uint8_t * src, void * ri) {
__m128i or, mr;
__m128i * r = ri;
or = _mm_load_si128((__m128i *) src);
mr = or;
mr = _mm_xor_si128(mr, r[0]);
mr = _mm_aesenc_si128(mr, r[1]);
mr = _mm_aesenc_si128(mr, r[2]);
mr = _mm_aesenc_si128(mr, r[3]);
mr = _mm_aesenc_si128(mr, r[4]);
mr = _mm_aesenc_si128(mr, r[5]);
mr = _mm_aesenc_si128(mr, r[6]);
mr = _mm_aesenc_si128(mr, r[7]);
mr = _mm_aesenc_si128(mr, r[8]);
mr = _mm_aesenc_si128(mr, r[9]);
mr = _mm_aesenclast_si128(mr, r[10]);
mr = _mm_xor_si128(mr, or);
_mm_storeu_si128((__m128i*) dest, mr);
}
void offline_prg_oct(uint8_t * dest1, uint8_t * dest2, uint8_t * dest3, uint8_t * dest4,
uint8_t * dest5, uint8_t * dest6, uint8_t * dest7, uint8_t * dest8,
uint8_t * src1, uint8_t * src2, uint8_t * src3, uint8_t * src4,
uint8_t * src5, uint8_t * src6, uint8_t * src7, uint8_t * src8,
void * ri1, void * ri2 , void * ri3 , void * ri4,
void * ri5, void * ri6 , void * ri7 , void * ri8
) {
__m128i * mr1 = dest1;
__m128i * mr2 = dest2;
__m128i * mr3 = dest3;
__m128i * mr4 = dest4;
__m128i * mr5 = dest5;
__m128i * mr6 = dest6;
__m128i * mr7 = dest7;
__m128i * mr8 = dest8;
__m128i * r1 = ri1;
__m128i * r2 = ri2;
__m128i * r3 = ri3;
__m128i * r4 = ri4;
__m128i * r5 = ri5;
__m128i * r6 = ri6;
__m128i * r7 = ri7;
__m128i * r8 = ri8;
*mr1 = _mm_load_si128((__m128i *) src1);
*mr2 = _mm_load_si128((__m128i *) src2);
*mr3 = _mm_load_si128((__m128i *) src3);
*mr4 = _mm_load_si128((__m128i *) src4);
*mr5 = _mm_load_si128((__m128i *) src5);
*mr6 = _mm_load_si128((__m128i *) src6);
*mr7 = _mm_load_si128((__m128i *) src7);
*mr8 = _mm_load_si128((__m128i *) src8);
*mr1 = _mm_xor_si128(*mr1, r1[0]);
*mr2 = _mm_xor_si128(*mr2, r2[0]);
*mr3 = _mm_xor_si128(*mr3, r3[0]);
*mr4 = _mm_xor_si128(*mr4, r4[0]);
*mr5 = _mm_xor_si128(*mr5, r5[0]);
*mr6 = _mm_xor_si128(*mr6, r6[0]);
*mr7 = _mm_xor_si128(*mr7, r7[0]);
*mr8 = _mm_xor_si128(*mr8, r8[0]);
*mr1 = _mm_aesenc_si128(*mr1, r1[1]);
*mr2 = _mm_aesenc_si128(*mr2, r2[1]);
*mr3 = _mm_aesenc_si128(*mr3, r3[1]);
*mr4 = _mm_aesenc_si128(*mr4, r4[1]);
*mr5 = _mm_aesenc_si128(*mr5, r5[1]);
*mr6 = _mm_aesenc_si128(*mr6, r6[1]);
*mr7 = _mm_aesenc_si128(*mr7, r7[1]);
*mr8 = _mm_aesenc_si128(*mr8, r8[1]);
*mr1 = _mm_aesenc_si128(*mr1, r1[2]);
*mr2 = _mm_aesenc_si128(*mr2, r2[2]);
*mr3 = _mm_aesenc_si128(*mr3, r3[2]);
*mr4 = _mm_aesenc_si128(*mr4, r4[2]);
*mr5 = _mm_aesenc_si128(*mr5, r5[2]);
*mr6 = _mm_aesenc_si128(*mr6, r6[2]);
*mr7 = _mm_aesenc_si128(*mr7, r7[2]);
*mr8 = _mm_aesenc_si128(*mr8, r8[2]);
*mr1 = _mm_aesenc_si128(*mr1, r1[3]);
*mr2 = _mm_aesenc_si128(*mr2, r2[3]);
*mr3 = _mm_aesenc_si128(*mr3, r3[3]);
*mr4 = _mm_aesenc_si128(*mr4, r4[3]);
*mr5 = _mm_aesenc_si128(*mr5, r5[3]);
*mr6 = _mm_aesenc_si128(*mr6, r6[3]);
*mr7 = _mm_aesenc_si128(*mr7, r7[3]);
*mr8 = _mm_aesenc_si128(*mr8, r8[3]);
*mr1 = _mm_aesenc_si128(*mr1, r1[4]);
*mr2 = _mm_aesenc_si128(*mr2, r2[4]);
*mr3 = _mm_aesenc_si128(*mr3, r3[4]);
*mr4 = _mm_aesenc_si128(*mr4, r4[4]);
*mr5 = _mm_aesenc_si128(*mr5, r5[4]);
*mr6 = _mm_aesenc_si128(*mr6, r6[4]);
*mr7 = _mm_aesenc_si128(*mr7, r7[4]);
*mr8 = _mm_aesenc_si128(*mr8, r8[4]);
*mr1 = _mm_aesenc_si128(*mr1, r1[5]);
*mr2 = _mm_aesenc_si128(*mr2, r2[5]);
*mr3 = _mm_aesenc_si128(*mr3, r3[5]);
*mr4 = _mm_aesenc_si128(*mr4, r4[5]);
*mr5 = _mm_aesenc_si128(*mr5, r5[5]);
*mr6 = _mm_aesenc_si128(*mr6, r6[5]);
*mr7 = _mm_aesenc_si128(*mr7, r7[5]);
*mr8 = _mm_aesenc_si128(*mr8, r8[5]);
*mr1 = _mm_aesenc_si128(*mr1, r1[6]);
*mr2 = _mm_aesenc_si128(*mr2, r2[6]);
*mr3 = _mm_aesenc_si128(*mr3, r3[6]);
*mr4 = _mm_aesenc_si128(*mr4, r4[6]);
*mr5 = _mm_aesenc_si128(*mr5, r5[6]);
*mr6 = _mm_aesenc_si128(*mr6, r6[6]);
*mr7 = _mm_aesenc_si128(*mr7, r7[6]);
*mr8 = _mm_aesenc_si128(*mr8, r8[6]);
*mr1 = _mm_aesenc_si128(*mr1, r1[7]);
*mr2 = _mm_aesenc_si128(*mr2, r2[7]);
*mr3 = _mm_aesenc_si128(*mr3, r3[7]);
*mr4 = _mm_aesenc_si128(*mr4, r4[7]);
*mr5 = _mm_aesenc_si128(*mr5, r5[7]);
*mr6 = _mm_aesenc_si128(*mr6, r6[7]);
*mr7 = _mm_aesenc_si128(*mr7, r7[7]);
*mr8 = _mm_aesenc_si128(*mr8, r8[7]);
*mr1 = _mm_aesenc_si128(*mr1, r1[8]);
*mr2 = _mm_aesenc_si128(*mr2, r2[8]);
*mr3 = _mm_aesenc_si128(*mr3, r3[8]);
*mr4 = _mm_aesenc_si128(*mr4, r4[8]);
*mr5 = _mm_aesenc_si128(*mr5, r5[8]);
*mr6 = _mm_aesenc_si128(*mr6, r6[8]);
*mr7 = _mm_aesenc_si128(*mr7, r7[8]);
*mr8 = _mm_aesenc_si128(*mr8, r8[8]);
*mr1 = _mm_aesenc_si128(*mr1, r1[9]);
*mr2 = _mm_aesenc_si128(*mr2, r2[9]);
*mr3 = _mm_aesenc_si128(*mr3, r3[9]);
*mr4 = _mm_aesenc_si128(*mr4, r4[9]);
*mr5 = _mm_aesenc_si128(*mr5, r5[9]);
*mr6 = _mm_aesenc_si128(*mr6, r6[9]);
*mr7 = _mm_aesenc_si128(*mr7, r7[9]);
*mr8 = _mm_aesenc_si128(*mr8, r8[9]);
*mr1 = _mm_aesenclast_si128(*mr1, r1[10]);
*mr2 = _mm_aesenclast_si128(*mr2, r2[10]);
*mr3 = _mm_aesenclast_si128(*mr3, r3[10]);
*mr4 = _mm_aesenclast_si128(*mr4, r4[10]);
*mr5 = _mm_aesenclast_si128(*mr5, r5[10]);
*mr6 = _mm_aesenclast_si128(*mr6, r6[10]);
*mr7 = _mm_aesenclast_si128(*mr7, r7[10]);
*mr8 = _mm_aesenclast_si128(*mr8, r8[10]);
*mr1 = _mm_xor_si128(*mr1, _mm_load_si128((__m128i *) src1));
*mr2 = _mm_xor_si128(*mr2, _mm_load_si128((__m128i *) src2));
*mr3 = _mm_xor_si128(*mr3, _mm_load_si128((__m128i *) src3));
*mr4 = _mm_xor_si128(*mr4, _mm_load_si128((__m128i *) src4));
*mr5 = _mm_xor_si128(*mr5, _mm_load_si128((__m128i *) src5));
*mr6 = _mm_xor_si128(*mr6, _mm_load_si128((__m128i *) src6));
*mr7 = _mm_xor_si128(*mr7, _mm_load_si128((__m128i *) src7));
*mr8 = _mm_xor_si128(*mr8, _mm_load_si128((__m128i *) src8));
}
void offline_expand_from(uint8_t * dest, uint8_t * src, size_t i, size_t n) {
// this version handles the case when n!=2 using a loop
__m128i seed;
seed = _mm_load_si128((__m128i *) src);
__m128i r1,r2,r3,r4,r5,r6,r7,r8,r9,r10; // next key
__m128i mr, ok;
ok = seed;
KE2(r1, ok, 0x01)
KE2(r2, r1, 0x02)
KE2(r3, r2, 0x04)
KE2(r4, r3, 0x08)
KE2(r5, r4, 0x10)
KE2(r6, r5, 0x20)
KE2(r7, r6, 0x40)
KE2(r8, r7, 0x80)
KE2(r9, r8, 0x1b)
KE2(r10, r9, 0x36)
__m128i mask = _mm_set_epi64((__m64)0x08090a0b0c0d0e0fULL, (__m64)0x0001020304050607ULL );
floram_set_procs_for_data_size(n*BLOCKSIZE);
#pragma omp parallel for schedule(guided)
for(size_t li=0; li<n-n%4; li+=4) {
__m128i mr1, mr2, mr3, mr4;
mr1 = _mm_set_epi64((__m64)(li+i),(__m64)0l);
mr2 = _mm_set_epi64((__m64)(li+i+1),(__m64)0l);
mr3 = _mm_set_epi64((__m64)(li+i+2),(__m64)0l);
mr4 = _mm_set_epi64((__m64)(li+i+3),(__m64)0l);
mr1 = _mm_shuffle_epi8 (mr1, mask);
mr2 = _mm_shuffle_epi8 (mr2, mask);
mr3 = _mm_shuffle_epi8 (mr3, mask);
mr4 = _mm_shuffle_epi8 (mr4, mask);
mr1 = _mm_xor_si128(mr1, ok);
mr2 = _mm_xor_si128(mr2, ok);
mr3 = _mm_xor_si128(mr3, ok);
mr4 = _mm_xor_si128(mr4, ok);
mr1 = _mm_aesenc_si128(mr1, r1);
mr2 = _mm_aesenc_si128(mr2, r1);
mr3 = _mm_aesenc_si128(mr3, r1);
mr4 = _mm_aesenc_si128(mr4, r1);
mr1 = _mm_aesenc_si128(mr1, r2);
mr2 = _mm_aesenc_si128(mr2, r2);
mr3 = _mm_aesenc_si128(mr3, r2);
mr4 = _mm_aesenc_si128(mr4, r2);
mr1 = _mm_aesenc_si128(mr1, r3);
mr2 = _mm_aesenc_si128(mr2, r3);
mr3 = _mm_aesenc_si128(mr3, r3);
mr4 = _mm_aesenc_si128(mr4, r3);
mr1 = _mm_aesenc_si128(mr1, r4);
mr2 = _mm_aesenc_si128(mr2, r4);
mr3 = _mm_aesenc_si128(mr3, r4);
mr4 = _mm_aesenc_si128(mr4, r4);
mr1 = _mm_aesenc_si128(mr1, r5);
mr2 = _mm_aesenc_si128(mr2, r5);
mr3 = _mm_aesenc_si128(mr3, r5);
mr4 = _mm_aesenc_si128(mr4, r5);
mr1 = _mm_aesenc_si128(mr1, r6);
mr2 = _mm_aesenc_si128(mr2, r6);
mr3 = _mm_aesenc_si128(mr3, r6);
mr4 = _mm_aesenc_si128(mr4, r6);
mr1 = _mm_aesenc_si128(mr1, r7);
mr2 = _mm_aesenc_si128(mr2, r7);
mr3 = _mm_aesenc_si128(mr3, r7);
mr4 = _mm_aesenc_si128(mr4, r7);
mr1 = _mm_aesenc_si128(mr1, r8);
mr2 = _mm_aesenc_si128(mr2, r8);
mr3 = _mm_aesenc_si128(mr3, r8);
mr4 = _mm_aesenc_si128(mr4, r8);
mr1 = _mm_aesenc_si128(mr1, r9);
mr2 = _mm_aesenc_si128(mr2, r9);
mr3 = _mm_aesenc_si128(mr3, r9);
mr4 = _mm_aesenc_si128(mr4, r9);
mr1 = _mm_aesenclast_si128(mr1, r10);
mr2 = _mm_aesenclast_si128(mr2, r10);
mr3 = _mm_aesenclast_si128(mr3, r10);
mr4 = _mm_aesenclast_si128(mr4, r10);
uint8_t* pp1 = dest+(li*16);
uint8_t* pp2 = dest+((li+1)*16);
uint8_t* pp3 = dest+((li+2)*16);
uint8_t* pp4 = dest+((li+3)*16);
_mm_storeu_si128((__m128i*) pp1, mr1);
_mm_storeu_si128((__m128i*) pp2, mr2);
_mm_storeu_si128((__m128i*) pp3, mr3);
_mm_storeu_si128((__m128i*) pp4, mr4);
}
for(size_t li = n-n%4; li<n; li++) {
mr = _mm_set_epi64((__m64)(li+i),(__m64)0l); // msg = li
mr = _mm_shuffle_epi8 (mr, mask);
mr = _mm_xor_si128(mr, ok); // round 0
mr = _mm_aesenc_si128(mr, r1);
mr = _mm_aesenc_si128(mr, r2);
mr = _mm_aesenc_si128(mr, r3);
mr = _mm_aesenc_si128(mr, r4);
mr = _mm_aesenc_si128(mr, r5);
mr = _mm_aesenc_si128(mr, r6);
mr = _mm_aesenc_si128(mr, r7);
mr = _mm_aesenc_si128(mr, r8);
mr = _mm_aesenc_si128(mr, r9);
mr = _mm_aesenclast_si128(mr, r10);
uint8_t* pp = dest+(li*16);
_mm_storeu_si128((__m128i*) pp, mr);
}
}
#else //__AES__
#include "aes_gladman/aes.h"
#define htonll(x) ((1==htonl(1)) ? (x) : ((uint64_t)htonl((x) & 0xFFFFFFFF) << 32) | htonl((x) >> 32))
void offline_prg_init() {
aes_init();
}
void * offline_prg_keyschedule(uint8_t * src) {
uint8_t * r = malloc(11*16);
aes_encrypt_ctx cx = {0};
aes_encrypt_key128(src, &cx);
memcpy(r, cx.ks, 11*16);
return r;
}
void offline_prg(uint8_t * dest, uint8_t * src, void * ri) {
aes_encrypt_ctx cx = {0};
memcpy(cx.ks, ri, 11*16);
cx.inf.l = 0;
cx.inf.b[0] = 10 * 16;
aes_encrypt(src, dest, &cx);
#pragma omp simd
for (uint8_t ii = 0; ii < 2; ii++) {
((uint64_t *) dest)[ii] ^= ((uint64_t *) src)[ii];
}
}
void offline_prg_oct(uint8_t * dest1, uint8_t * dest2, uint8_t * dest3, uint8_t * dest4,
uint8_t * dest5, uint8_t * dest6, uint8_t * dest7, uint8_t * dest8,
uint8_t * src1, uint8_t * src2, uint8_t * src3, uint8_t * src4,
uint8_t * src5, uint8_t * src6, uint8_t * src7, uint8_t * src8,
void * ri1, void * ri2 , void * ri3 , void * ri4,
void * ri5, void * ri6 , void * ri7 , void * ri8
) {
offline_prg(dest1, src1, ri1);
offline_prg(dest2, src2, ri2);
offline_prg(dest3, src3, ri3);
offline_prg(dest4, src4, ri4);
offline_prg(dest5, src5, ri5);
offline_prg(dest6, src6, ri6);
offline_prg(dest7, src7, ri7);
offline_prg(dest8, src8, ri8);
}
void offline_expand_from(uint8_t * dest, uint8_t * src, size_t i, size_t n) {
uint8_t * key = offline_prg_keyschedule(src);
aes_encrypt_ctx cx = {0};
memcpy(cx.ks, key, 11*16);
cx.inf.l = 0;
cx.inf.b[0] = 10 * 16;
free(key);
floram_set_procs_for_data_size(n*BLOCKSIZE);
#pragma omp parallel for schedule(guided)
for(size_t li=0; li<n; li++) {
uint64_t iv[2] = {0,htonll(li+i)};
aes_encrypt(iv,&dest[li*16],&cx);
}
}
#endif //__AES__
void offline_expand(uint8_t * dest, uint8_t * src, size_t n) {
offline_expand_from(dest, src, 0, n);
} |
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
generator_gemm_common.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke, Evangelos Georganas (Intel Corp.)
******************************************************************************/
#include "generator_gemm_common.h"
#include "generator_common.h"
#include "generator_x86_instructions.h"
#include "libxsmm_main.h"
#include "generator_common_x86.h"
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_apply_relu_to_vreg( libxsmm_generated_code* io_generated_code,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int zero_vreg,
const unsigned int inout_vreg,
const unsigned int store_bitmask,
const unsigned int gpr_bitmask,
const unsigned int store_bitmask_offset,
const unsigned int is_32_bit_relu,
const unsigned int aux_gpr,
const unsigned int aux_vreg) {
if (io_generated_code->arch < LIBXSMM_X86_AVX512) {
if (is_32_bit_relu == 1) {
if (store_bitmask == 1) {
libxsmm_x86_instruction_vec_compute_3reg_imm8( io_generated_code, LIBXSMM_X86_INSTR_VCMPPS, i_micro_kernel_config->vector_name, zero_vreg, inout_vreg, aux_vreg, 6 );
libxsmm_x86_instruction_vec_compute_3reg_imm8( io_generated_code, LIBXSMM_X86_INSTR_VMOVMSKPS, i_micro_kernel_config->vector_name, aux_vreg, LIBXSMM_X86_VEC_REG_UNDEF, aux_gpr, 0 );
libxsmm_x86_instruction_alu_mem( io_generated_code, LIBXSMM_X86_INSTR_MOVB, gpr_bitmask, LIBXSMM_X86_GP_REG_UNDEF, 0, store_bitmask_offset, aux_gpr, 1);
}
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code, LIBXSMM_X86_INSTR_VMAXPS, i_micro_kernel_config->vector_name, inout_vreg, zero_vreg, inout_vreg );
} else {
/* shouldn't happen */
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_UNSUP_DATATYPE );
return;
}
} else {
if (store_bitmask == 0) {
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code,
(is_32_bit_relu == 1) ? LIBXSMM_X86_INSTR_VMAXPS : LIBXSMM_X86_INSTR_VPMAXSW,
i_micro_kernel_config->vector_name,
inout_vreg,
zero_vreg,
inout_vreg);
} else {
unsigned int current_mask_reg = 7;
libxsmm_x86_instruction_vec_compute_3reg_imm8( io_generated_code,
(is_32_bit_relu == 1) ? LIBXSMM_X86_INSTR_VCMPPS : LIBXSMM_X86_INSTR_VPCMPW,
i_micro_kernel_config->vector_name,
zero_vreg,
inout_vreg,
current_mask_reg, 6 );
/* Blend output result with zero reg based on relu mask */
libxsmm_x86_instruction_vec_compute_3reg_mask( io_generated_code,
(is_32_bit_relu == 1) ? LIBXSMM_X86_INSTR_VPBLENDMD : LIBXSMM_X86_INSTR_VPBLENDMW,
i_micro_kernel_config->vector_name,
inout_vreg,
zero_vreg,
inout_vreg,
current_mask_reg,
0 );
/* Store bitmask */
libxsmm_x86_instruction_mask_move_mem( io_generated_code,
(is_32_bit_relu == 1) ? LIBXSMM_X86_INSTR_KMOVW_ST : LIBXSMM_X86_INSTR_KMOVD_ST,
gpr_bitmask,
LIBXSMM_X86_GP_REG_UNDEF,
0,
store_bitmask_offset,
current_mask_reg );
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( libxsmm_generated_code* io_generated_code,
libxsmm_micro_kernel_config* i_micro_kernel_config_mod,
const unsigned int scratch_gpr,
const unsigned int in_vreg,
const unsigned int out_vreg ) {
/* Load accumulator from scratch */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config_mod->instruction_set,
LIBXSMM_X86_INSTR_VMOVUPS,
scratch_gpr,
LIBXSMM_X86_GP_REG_UNDEF, 0,
in_vreg * 64,
i_micro_kernel_config_mod->vector_name,
out_vreg, 0, 1, 0 );
/* Apply sigmoid */
if (io_generated_code->arch >= LIBXSMM_X86_AVX512) {
libxsmm_generator_sigmoid_ps_rational_78_avx512( io_generated_code, out_vreg, i_micro_kernel_config_mod->vec_x2,
i_micro_kernel_config_mod->vec_nom, i_micro_kernel_config_mod->vec_denom,
i_micro_kernel_config_mod->mask_hi, i_micro_kernel_config_mod->mask_lo,
i_micro_kernel_config_mod->vec_c0, i_micro_kernel_config_mod->vec_c1, i_micro_kernel_config_mod->vec_c2, i_micro_kernel_config_mod->vec_c3,
i_micro_kernel_config_mod->vec_c1_d, i_micro_kernel_config_mod->vec_c2_d, i_micro_kernel_config_mod->vec_c3_d,
i_micro_kernel_config_mod->vec_hi_bound, i_micro_kernel_config_mod->vec_lo_bound, i_micro_kernel_config_mod->vec_ones,
i_micro_kernel_config_mod->vec_neg_ones, i_micro_kernel_config_mod->vec_halves );
} else {
libxsmm_generator_sigmoid_ps_rational_78_avx( io_generated_code, out_vreg, i_micro_kernel_config_mod->vec_x2,
i_micro_kernel_config_mod->vec_nom, i_micro_kernel_config_mod->vec_denom,
i_micro_kernel_config_mod->vec_c0, i_micro_kernel_config_mod->vec_c1, i_micro_kernel_config_mod->vec_c2, i_micro_kernel_config_mod->vec_c3,
i_micro_kernel_config_mod->vec_c1_d, i_micro_kernel_config_mod->vec_c2_d, i_micro_kernel_config_mod->vec_c3_d,
i_micro_kernel_config_mod->vec_hi_bound, i_micro_kernel_config_mod->vec_lo_bound, i_micro_kernel_config_mod->vec_ones,
i_micro_kernel_config_mod->vec_neg_ones);
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_restore_2D_regblock_from_scratch( libxsmm_generated_code* io_generated_code,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int scratch_gpr,
const unsigned int l_vec_reg_acc_start,
const unsigned int l_m_blocking,
const unsigned int i_n_blocking) {
unsigned int l_n, l_m;
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVUPS,
scratch_gpr,
LIBXSMM_X86_GP_REG_UNDEF, 0,
(l_vec_reg_acc_start + l_m + (l_m_blocking * l_n)) * 64,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), 0, 0, 0 );
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_store_2D_regblock_to_scratch( libxsmm_generated_code* io_generated_code,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int scratch_gpr,
const unsigned int l_vec_reg_acc_start,
const unsigned int l_m_blocking,
const unsigned int i_n_blocking) {
unsigned int l_n, l_m;
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVUPS,
scratch_gpr,
LIBXSMM_X86_GP_REG_UNDEF, 0,
(l_vec_reg_acc_start + l_m + (l_m_blocking * l_n)) * 64,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), 0, 0, 1 );
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_dump_2D_block_and_prepare_sigmoid_fusion( libxsmm_generated_code* io_generated_code,
libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int l_vec_reg_acc_start,
const unsigned int l_m_blocking,
const unsigned int i_n_blocking,
const unsigned int scratch_gpr,
const unsigned int aux_gpr) {
unsigned int n_avail_vregs = (io_generated_code->arch >= LIBXSMM_X86_AVX512) ? 32 : 16;
unsigned int n_avail_masks = (io_generated_code->arch >= LIBXSMM_X86_AVX512) ? 8 : 16;
/* First dump the accumulators to scratch and then setup sigmoid coeffcients to be reused */
libxsmm_x86_instruction_push_reg( io_generated_code, scratch_gpr);
libxsmm_x86_instruction_push_reg( io_generated_code, aux_gpr );
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_GEMM_SCRATCH_PTR, scratch_gpr);
libxsmm_generator_gemm_store_2D_regblock_to_scratch( io_generated_code, i_micro_kernel_config,
scratch_gpr, l_vec_reg_acc_start, l_m_blocking, i_n_blocking);
libxsmm_generator_gemm_prepare_coeffs_sigmoid_ps_rational_78_avx_avx512( io_generated_code, i_micro_kernel_config, n_avail_vregs, n_avail_masks, aux_gpr );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_prepare_relu_fusion( libxsmm_generated_code* io_generated_code,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int zero_vreg,
const unsigned int store_bitmask,
const unsigned int bitmask_gpr,
const unsigned int aux_gpr) {
/* Zero out register 0 to perform relu */
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
zero_vreg, zero_vreg, zero_vreg);
if (store_bitmask == 1) {
libxsmm_x86_instruction_push_reg( io_generated_code, bitmask_gpr );
if (io_generated_code->arch < LIBXSMM_X86_AVX512) {
libxsmm_x86_instruction_push_reg( io_generated_code, aux_gpr );
}
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, bitmask_gpr );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_cleanup_relu_fusion( libxsmm_generated_code* io_generated_code,
const unsigned int store_bitmask,
const unsigned int bitmask_gpr,
const unsigned int aux_gpr) {
if (store_bitmask == 1) {
if (io_generated_code->arch < LIBXSMM_X86_AVX512) {
libxsmm_x86_instruction_pop_reg( io_generated_code, aux_gpr );
}
libxsmm_x86_instruction_pop_reg( io_generated_code, bitmask_gpr);
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_cleanup_sigmoid_fusion( libxsmm_generated_code* io_generated_code,
const unsigned int scratch_gpr,
const unsigned int aux_gpr ) {
libxsmm_x86_instruction_pop_reg( io_generated_code, aux_gpr );
libxsmm_x86_instruction_pop_reg( io_generated_code, scratch_gpr );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_load_colbias_to_2D_block( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
libxsmm_datatype colbias_precision,
const unsigned int l_vec_reg_acc_start,
const unsigned int l_m_blocking,
const unsigned int i_n_blocking ) {
unsigned int l_n = 0, l_m = 0;
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_2 );
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
if (colbias_precision == LIBXSMM_DATATYPE_BF16) {
if (l_n == 0) {
/* Load bias vector */
/* load 16 bit values into xmm portion of the register */
if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU16,
i_gp_reg_mapping->gp_reg_help_2,
LIBXSMM_X86_GP_REG_UNDEF, 0,
(l_m * (i_micro_kernel_config->vector_length)) * 2,
( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'y' : 'z',
l_vec_reg_acc_start + l_m, 2, 1, 0 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_help_2,
LIBXSMM_X86_GP_REG_UNDEF, 0,
(l_m * (i_micro_kernel_config->vector_length)) * 2,
( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'x' : 'y',
l_vec_reg_acc_start + l_m, 0, 1, 0 );
}
/* convert 16 bit values into 32 bit (integer convert) */
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPMOVSXWD,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m, l_vec_reg_acc_start + l_m );
/* shift 16 bits to the left to generate valid FP32 numbers */
libxsmm_x86_instruction_vec_compute_2reg_imm8(io_generated_code,
LIBXSMM_X86_INSTR_VPSLLD_I,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m,
l_vec_reg_acc_start + l_m,
16);
} else {
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VMOVUPS,
( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'y' : 'z',
l_vec_reg_acc_start + l_m, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
} else if (colbias_precision == LIBXSMM_DATATYPE_F32) {
if (l_n == 0) {
/* Load bias vector */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_help_2,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_m * (i_micro_kernel_config->vector_length))) * 4,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m, ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 );
} else {
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VMOVUPS, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
} else {
/* shouldn't happen */
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_UNSUP_DATATYPE );
return;
}
}
}
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_add_colbias_to_2D_block( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
libxsmm_datatype colbias_precision,
const unsigned int l_vec_reg_acc_start,
const unsigned int l_m_blocking,
const unsigned int i_n_blocking ) {
unsigned int l_n = 0, l_m = 0;
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_2 );
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* Load bias vector */
if (colbias_precision == LIBXSMM_DATATYPE_BF16) {
/* load 16 bit values into xmm portion of the register */
if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU16,
i_gp_reg_mapping->gp_reg_help_2,
LIBXSMM_X86_GP_REG_UNDEF, 0,
(l_m * (i_micro_kernel_config->vector_length)) * 2,
( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'y' : 'z',
0, 2, 1, 0 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_help_2,
LIBXSMM_X86_GP_REG_UNDEF, 0,
(l_m * (i_micro_kernel_config->vector_length)) * 2,
( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'x' : 'y',
0, 0, 1, 0 );
}
/* convert 16 bit values into 32 bit (integer convert) */
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPMOVSXWD,
i_micro_kernel_config->vector_name,
0, 0 );
/* shift 16 bits to the left to generate valid FP32 numbers */
libxsmm_x86_instruction_vec_compute_2reg_imm8(io_generated_code,
LIBXSMM_X86_INSTR_VPSLLD_I,
i_micro_kernel_config->vector_name,
0,
0,
16);
} else if (colbias_precision == LIBXSMM_DATATYPE_F32) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_help_2,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_m * (i_micro_kernel_config->vector_length))) * 4,
i_micro_kernel_config->vector_name,
0, ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 );
} else {
/* shouldn't happen */
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_UNSUP_DATATYPE );
return;
}
/* Add colbias */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code, LIBXSMM_X86_INSTR_VADDPS, i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), 0, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_prepare_coeffs_sigmoid_ps_rational_78_avx_avx512( libxsmm_generated_code* io_generated_code,
libxsmm_micro_kernel_config* i_micro_kernel_config,
unsigned int reserved_zmms,
unsigned int reserved_mask_regs,
unsigned int temp_reg ) {
float pade78_sigm_array[16] = { 2027025.0f, 270270.0f, 6930.0f, 36.0f, 945945.0f, 51975.0f, 630.0f, 4.97f, -4.97f, 1.0f, -1.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.0f };
i_micro_kernel_config->vec_x2 = reserved_zmms - 1;
i_micro_kernel_config->vec_nom = reserved_zmms - 2;
i_micro_kernel_config->vec_denom = reserved_zmms - 3;
i_micro_kernel_config->vec_c0 = reserved_zmms - 4;
i_micro_kernel_config->vec_c1 = reserved_zmms - 5;
i_micro_kernel_config->vec_c2 = reserved_zmms - 6;
i_micro_kernel_config->vec_c3 = reserved_zmms - 7;
i_micro_kernel_config->vec_c1_d = reserved_zmms - 8;
i_micro_kernel_config->vec_c2_d = reserved_zmms - 9;
i_micro_kernel_config->vec_c3_d = reserved_zmms - 10;
i_micro_kernel_config->vec_hi_bound = reserved_zmms - 11;
i_micro_kernel_config->vec_lo_bound = reserved_zmms - 12;
i_micro_kernel_config->vec_ones = reserved_zmms - 13;
i_micro_kernel_config->vec_neg_ones = reserved_zmms - 14;
i_micro_kernel_config->vec_halves = reserved_zmms - 15;
libxsmm_x86_instruction_full_vec_load_of_constants ( io_generated_code, (const unsigned char *) pade78_sigm_array, "pade78_sigm_array_", i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c0);
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_GEMM_SCRATCH_PTR, temp_reg );
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVUPS,
temp_reg,
LIBXSMM_X86_GP_REG_UNDEF, 0, 0,
i_micro_kernel_config->vector_name,
i_micro_kernel_config->vec_c0, 0, 1, 1 );
if (io_generated_code->arch < LIBXSMM_X86_AVX512) {
libxsmm_x86_instruction_full_vec_load_of_constants ( io_generated_code, (const unsigned char *) &pade78_sigm_array[8], "pade78_sigm_array2_", i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c0);
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVUPS,
temp_reg,
LIBXSMM_X86_GP_REG_UNDEF, 0, 32,
i_micro_kernel_config->vector_name,
i_micro_kernel_config->vec_c0, 0, 1, 1 );
}
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
0, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c0, 0, 1, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
4, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c1, 0, 1, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
8, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c2, 0, 1, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
12, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c3, 0, 1, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
16, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c1_d, 0, 1, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
20, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c2_d, 0, 1, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
24, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c3_d, 0, 1, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
28, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_hi_bound, 0, 1, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
32, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_lo_bound, 0, 1, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
36, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_ones, 0, 1, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
40, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_neg_ones, 0, 1, 0 );
if (io_generated_code->arch >= LIBXSMM_X86_AVX512) {
libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0,
44, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_halves, 0, 1, 0 );
}
i_micro_kernel_config->mask_hi = reserved_mask_regs - 1;
i_micro_kernel_config->mask_lo = reserved_mask_regs - 2;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_setup_stack_frame_fill_stack_vars_v2( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping ) {
int is_stride_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_STRIDE) > 0) ? 1 : 0;
int is_offset_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) > 0) ? 1 : 0;
int is_address_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) > 0) ? 1 : 0;
int is_brgemm = ((is_stride_brgemm == 1) || (is_offset_brgemm == 1) || (is_address_brgemm == 1)) ? 1 : 0;
int has_scf = ((LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) && (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ))) ? 1 : 0;
int has_A_pf_ptr = (i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2_AHEAD || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD) ? 1 : 0;
int has_B_pf_ptr = (i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_PREFETCH_AL2CL2BL2_VIA_C ) ? 1 : 0;
unsigned int temp_reg = LIBXSMM_X86_GP_REG_R10;
if (has_scf == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 112, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_INT8_SCF, temp_reg );
}
if (has_A_pf_ptr == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 56, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg );
}
if (has_B_pf_ptr == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 88, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
if ((is_brgemm == 1) && ( i_micro_kernel_config->decompress_A == 1)) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_BRCOUNT, i_gp_reg_mapping->gp_reg_reduce_count );
}
if (is_offset_brgemm == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 40, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_A_OFFS_BRGEMM_PTR, temp_reg );
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 72, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_B_OFFS_BRGEMM_PTR, temp_reg );
}
if (i_micro_kernel_config->fused_eltwise == 1) {
if (i_micro_kernel_config->has_colbias_act_fused == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 128, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, temp_reg );
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 104, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, temp_reg );
}
if (i_micro_kernel_config->decompress_A == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 48, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BITMAP_PTR, temp_reg );
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 160, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_DECOMPRESS_BUF, temp_reg );
}
if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 104, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, temp_reg );
}
if (i_micro_kernel_config->fused_relu_bwd == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 104, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, temp_reg );
}
if (i_micro_kernel_config->norm_to_normT_B_ext_buf == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 192, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_B, temp_reg );
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_setup_stack_frame_fill_stack_vars(libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
libxsmm_micro_kernel_config* i_micro_kernel_config) {
int is_stride_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_STRIDE) > 0) ? 1 : 0;
int is_offset_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) > 0) ? 1 : 0;
int is_address_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) > 0) ? 1 : 0;
int is_brgemm = ((is_stride_brgemm == 1) || (is_offset_brgemm == 1) || (is_address_brgemm == 1)) ? 1 : 0;
int has_scf = ((LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) && (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ))) ? 1 : 0;
int has_A_pf_ptr = (i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2_AHEAD || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD) ? 1 : 0;
int has_B_pf_ptr = (i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_PREFETCH_AL2CL2BL2_VIA_C ) ? 1 : 0;
unsigned int eltwise_struct_ptr_reg = LIBXSMM_X86_GP_REG_R11;
unsigned int temp_reg = LIBXSMM_X86_GP_REG_R10;
if (is_brgemm == 0) {
/* GEMM (A, B, C, [scf, eltwise_struct, Apf, Bpf] */
if (has_scf == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_INT8_SCF, LIBXSMM_X86_GP_REG_RCX );
if (i_micro_kernel_config->fused_eltwise == 1) {
eltwise_struct_ptr_reg = LIBXSMM_X86_GP_REG_R8;
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R9 );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 );
}
}
} else {
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R8 );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R8 );
}
}
}
} else {
if (i_micro_kernel_config->fused_eltwise == 1) {
eltwise_struct_ptr_reg = LIBXSMM_X86_GP_REG_RCX;
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R8 );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R8 );
}
}
} else {
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_RCX );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R8 );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_RCX );
}
}
}
}
} else {
if (i_micro_kernel_config->decompress_A == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RCX, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_BRCOUNT, temp_reg );
}
if ((is_stride_brgemm == 1) || (is_address_brgemm == 1)) {
/* BRGEMM_ADDR/STRIDE (A, B, C, cnt, [scf, eltwise_struct, Apf, Bpf] */
if (has_scf == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_INT8_SCF, LIBXSMM_X86_GP_REG_R8 );
if (i_micro_kernel_config->fused_eltwise == 1) {
eltwise_struct_ptr_reg = LIBXSMM_X86_GP_REG_R9;
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
}
} else {
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R9 );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 );
}
}
}
} else {
if (i_micro_kernel_config->fused_eltwise == 1) {
eltwise_struct_ptr_reg = LIBXSMM_X86_GP_REG_R8;
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R9 );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 );
}
}
} else {
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R8 );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R8 );
}
}
}
}
} else {
/* BRGEMM_OFFS (A, B, C, cnt, A_off, B_off, [scf, eltwise struct, Apf, Bpf] */
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_A_OFFS_BRGEMM_PTR, LIBXSMM_X86_GP_REG_R8 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_B_OFFS_BRGEMM_PTR, LIBXSMM_X86_GP_REG_R9 );
if (has_scf == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_INT8_SCF, temp_reg );
if (i_micro_kernel_config->fused_eltwise == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, eltwise_struct_ptr_reg );
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_9, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_10, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_9, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
}
} else {
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_9, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
}
}
} else {
if (i_micro_kernel_config->fused_eltwise == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, eltwise_struct_ptr_reg );
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_9, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
}
} else {
if (has_A_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg );
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
} else {
if (has_B_pf_ptr == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg );
}
}
}
}
}
}
if (i_micro_kernel_config->fused_eltwise == 1) {
if (i_micro_kernel_config->has_colbias_act_fused == 1) {
/* TODO: Optimize this copy to operate only in used fileds form struct... */
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, temp_reg );
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 8, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, temp_reg );
}
if (i_micro_kernel_config->decompress_A == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 16, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BITMAP_PTR, temp_reg );
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 24, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_DECOMPRESS_BUF, temp_reg );
}
if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 8, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, temp_reg );
}
if (i_micro_kernel_config->fused_relu_bwd == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 32, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, temp_reg );
}
if (i_micro_kernel_config->norm_to_normT_B_ext_buf == 1) {
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 16, temp_reg, 0 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_B, temp_reg );
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_setup_stack_frame_allocate_scratch( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
libxsmm_micro_kernel_config* i_micro_kernel_config ) {
unsigned int gemm_scratch_size = 0;
unsigned int scratch_pad_size = 0;
int l_emu_amx = 0;
const char *const l_env_emu_amx = getenv("EMULATE_AMX");
if ( 0 == l_env_emu_amx ) {
} else {
l_emu_amx = atoi(l_env_emu_amx);
}
if (l_emu_amx > 0) {
int expand_scratch_factor = (i_micro_kernel_config->n_tiles == 1) ? 2 : 1;
i_micro_kernel_config->emulation_scratch_offset = expand_scratch_factor * i_xgemm_desc->n * i_xgemm_desc->ldc * 4 /*i_micro_kernel_config->datatype_size*/;
gemm_scratch_size = expand_scratch_factor * i_xgemm_desc->n * i_xgemm_desc->ldc * 4 /*i_micro_kernel_config->datatype_size*/ + 8 * 32 * 32 + 32 * 64 ;
if (LIBXSMM_DATATYPE_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype )) {
i_micro_kernel_config->emulation_scratch_offset = 0;
gemm_scratch_size = 8 * 32 * 32 + 32 * 64 ;
}
} else {
if ((io_generated_code->arch >= LIBXSMM_X86_AVX512_SPR)) {
int expand_scratch_factor = (i_micro_kernel_config->n_tiles == 1) ? 2 : 1;
gemm_scratch_size = LIBXSMM_MAX(32*64, expand_scratch_factor * i_xgemm_desc->n * i_xgemm_desc->ldc * 4/*i_micro_kernel_config->datatype_size*/);
} else {
/* Allocate scratch for stashing 32 zmms */
if ( ((LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI & i_xgemm_desc->flags) == LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI) ) {
gemm_scratch_size = 32 * 64;
}
}
}
scratch_pad_size = (gemm_scratch_size % 64 == 0) ? 0 : ((gemm_scratch_size + 63)/64) * 64 - gemm_scratch_size;
gemm_scratch_size += scratch_pad_size;
if (gemm_scratch_size > 0) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, LIBXSMM_X86_GP_REG_RSP, gemm_scratch_size );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_GEMM_SCRATCH_PTR, LIBXSMM_X86_GP_REG_RSP );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_setup_stack_frame( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
libxsmm_micro_kernel_config* i_micro_kernel_config ) {
unsigned int temp_reg = LIBXSMM_X86_GP_REG_R10;
libxsmm_x86_instruction_push_reg( io_generated_code, LIBXSMM_X86_GP_REG_RBP );
libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_RBP);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, LIBXSMM_X86_GP_REG_RSP, 88 );
if ( ((LIBXSMM_GEMM_FLAG_USE_XGEMM_ABI & i_xgemm_desc->flags) == LIBXSMM_GEMM_FLAG_USE_XGEMM_ABI) ||
((LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI & i_xgemm_desc->flags) == LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI) ) {
libxsmm_generator_gemm_setup_stack_frame_fill_stack_vars_v2( io_generated_code, i_xgemm_desc, i_micro_kernel_config, i_gp_reg_mapping );
} else {
libxsmm_generator_gemm_setup_stack_frame_fill_stack_vars(io_generated_code, i_xgemm_desc, i_micro_kernel_config);
}
/* The stack now looks like this:
* 10th param (if applicable) <-- RBP+80
* 9th param (if applicable) <-- RBP+72
* 8th param (if applicable) <-- RBP+64
* 7th param (if applicable) <-- RBP+56
* Return address <-- RBP+48
* Calle SAVED-regs <-- RBP[+8,+16,+24,+32,+40]
* Entry/saved RBP <-- RBP
* prefetch A ptr <-- RBP-8
* prefetch B ptr <-- RBP-16
* Offset A array ptr <-- RBP-24
* Offset B array ptr <-- RBP-32
* Int8 scaling factor <-- RBP-40
* GEMM_scratch ptr in stack (to be filled) <-- RBP-48
* Eltwise bias ptr <-- RBP-56
* Eltwise output_ptr <-- RBP-64
* Eltwise buf1_ptr <-- RBP-72
* Eltwise buf2_ptr <-- RBP-80
* Batch-reduce count <-- RBP-88, RSP
*
*/
/* Now align RSP to 64 byte boundary */
libxsmm_x86_instruction_alu_imm_i64( io_generated_code, i_micro_kernel_config->alu_mov_instruction, temp_reg, 0xFFFFFFFFFFFFFFC0 );
libxsmm_x86_instruction_alu_reg( io_generated_code, LIBXSMM_X86_INSTR_ANDQ, temp_reg, LIBXSMM_X86_GP_REG_RSP);
/* Now alllocate in stack required GEMM scratch if necessary*/
libxsmm_generator_gemm_setup_stack_frame_allocate_scratch( io_generated_code, i_xgemm_desc, i_micro_kernel_config );
/* The stack at exit of setup looks like this:
*
* 10th param (if applicable) <-- RBP+80
* 9th param (if applicable) <-- RBP+72
* 8th param (if applicable) <-- RBP+64
* 7th param (if applicable) <-- RBP+56
* Return address <-- RBP+48
* Calle SAVED-regs <-- RBP[+8,+16,+24,+32,+40]
* Entry/saved RBP <-- RBP
* prefetch A ptr <-- RBP-8
* prefetch B ptr <-- RBP-16
* Offset A array ptr <-- RBP-24
* Offset B array ptr <-- RBP-32
* Int8 scaling factor <-- RBP-40
* GEMM_scratch ptr in stack <-- RBP-48
* Eltwise bias ptr <-- RBP-56
* Eltwise output_ptr <-- RBP-64
* Eltwise buf1_ptr <-- RBP-72
* Eltwise buf2_ptr <-- RBP-80
* Batch-reduce count <-- RBP-88, RSP
* [ Potentianl pad for 64b align ]
* GEMM scratch, 64b aligned <-- (RBP-48) contains this address
*
*/
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_destroy_stack_frame( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config ) {
LIBXSMM_UNUSED(i_xgemm_desc);
LIBXSMM_UNUSED(i_gp_reg_mapping);
libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RBP, LIBXSMM_X86_GP_REG_RSP);
libxsmm_x86_instruction_pop_reg( io_generated_code, LIBXSMM_X86_GP_REG_RBP );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_setup_fusion_microkernel_properties_v2(const libxsmm_gemm_descriptor* i_xgemm_desc,
libxsmm_micro_kernel_config* i_micro_kernel_config ) {
i_micro_kernel_config->fused_bcolbias = 0;
i_micro_kernel_config->fused_scolbias = 0;
i_micro_kernel_config->fused_relu = 0;
i_micro_kernel_config->fused_relu_nobitmask = 0;
i_micro_kernel_config->fused_relu_bwd = 0;
i_micro_kernel_config->fused_sigmoid = 0;
i_micro_kernel_config->overwrite_C = 1;
i_micro_kernel_config->vnni_format_C = 0;
i_micro_kernel_config->decompress_A = 0;
i_micro_kernel_config->sparsity_factor_A = 1;
i_micro_kernel_config->vnni_cvt_output_ext_buf = 0;
i_micro_kernel_config->norm_to_normT_B_ext_buf = 0;
i_micro_kernel_config->stride_b_trans = 0;
i_micro_kernel_config->fused_eltwise = 0;
i_micro_kernel_config->has_colbias_act_fused = 0;
if ( ((LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI & i_xgemm_desc->flags) == LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI) ) {
i_micro_kernel_config->overwrite_C = ((i_xgemm_desc->internal_flags_2 & 0x4) > 0) ? 0 : 1;
if (i_xgemm_desc->eltw_cp_op == LIBXSMM_MELTW_OPERATION_UNARY) {
if (i_xgemm_desc->eltw_cp_param == LIBXSMM_MELTW_TYPE_UNARY_RELU) {
i_micro_kernel_config->has_colbias_act_fused = 1;
if ((i_xgemm_desc->eltw_cp_flags & LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT) > 0){
i_micro_kernel_config->fused_relu = 1;
} else {
i_micro_kernel_config->fused_relu_nobitmask = 1;
}
}
if (i_xgemm_desc->eltw_cp_param == LIBXSMM_MELTW_TYPE_UNARY_SIGMOID) {
i_micro_kernel_config->has_colbias_act_fused = 1;
i_micro_kernel_config->fused_sigmoid = 1;
}
if (i_xgemm_desc->eltw_cp_param == LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI) {
i_micro_kernel_config->vnni_format_C = 1;
if (i_micro_kernel_config->overwrite_C == 0) {
i_micro_kernel_config->vnni_cvt_output_ext_buf = 1;
}
}
if (i_xgemm_desc->eltw_cp_param == LIBXSMM_MELTW_TYPE_UNARY_RELU_INV) {
i_micro_kernel_config->has_colbias_act_fused = 1;
i_micro_kernel_config->fused_relu_bwd = 1;
}
}
if (i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_BINARY) {
if (i_xgemm_desc->meltw_param == LIBXSMM_MELTW_TYPE_BINARY_ADD) {
if (((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_0) > 0 ) ||
((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1) > 0 )) {
i_micro_kernel_config->has_colbias_act_fused = 1;
if (i_xgemm_desc->meltw_datatype_aux == LIBXSMM_DATATYPE_BF16) {
i_micro_kernel_config->fused_bcolbias = 1;
}
if (i_xgemm_desc->meltw_datatype_aux == LIBXSMM_DATATYPE_F32) {
i_micro_kernel_config->fused_scolbias = 1;
}
}
}
}
if (i_xgemm_desc->eltw_ap_op == LIBXSMM_MELTW_OPERATION_UNARY) {
if ((i_xgemm_desc->internal_flags_2 & 0x1) > 0){
if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_1) {
i_micro_kernel_config->decompress_A = 1;
i_micro_kernel_config->sparsity_factor_A = 1;
}
if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_2) {
i_micro_kernel_config->decompress_A = 1;
i_micro_kernel_config->sparsity_factor_A = 2;
}
if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_4) {
i_micro_kernel_config->decompress_A = 1;
i_micro_kernel_config->sparsity_factor_A = 4;
}
if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_8) {
i_micro_kernel_config->decompress_A = 1;
i_micro_kernel_config->sparsity_factor_A = 8;
}
if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_16) {
i_micro_kernel_config->decompress_A = 1;
i_micro_kernel_config->sparsity_factor_A = 16;
}
if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_32) {
i_micro_kernel_config->decompress_A = 1;
i_micro_kernel_config->sparsity_factor_A = 32;
}
}
}
if (i_xgemm_desc->eltw_bp_op == LIBXSMM_MELTW_OPERATION_UNARY) {
if (i_xgemm_desc->eltw_bp_param == LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT) {
if ((i_xgemm_desc->internal_flags_2 & 0x2) > 0){
i_micro_kernel_config->norm_to_normT_B_ext_buf = 1;
i_micro_kernel_config->stride_b_trans = i_xgemm_desc->ldbp;
}
}
}
i_micro_kernel_config->fused_eltwise = (i_micro_kernel_config->has_colbias_act_fused == 1) ? 1: 0;
if (i_micro_kernel_config->decompress_A == 1) {
i_micro_kernel_config->fused_eltwise = 1;
}
if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) {
i_micro_kernel_config->fused_eltwise = 1;
}
if (i_micro_kernel_config->norm_to_normT_B_ext_buf == 1) {
i_micro_kernel_config->fused_eltwise = 1;
}
if (i_micro_kernel_config->fused_relu_bwd == 1) {
i_micro_kernel_config->fused_eltwise = 1;
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_setup_fusion_microkernel_properties(const libxsmm_gemm_descriptor* i_xgemm_desc,
libxsmm_micro_kernel_config* i_micro_kernel_config ) {
i_micro_kernel_config->fused_bcolbias = 0;
i_micro_kernel_config->fused_scolbias = 0;
i_micro_kernel_config->fused_relu = 0;
i_micro_kernel_config->fused_relu_nobitmask = 0;
i_micro_kernel_config->fused_relu_bwd = 0;
i_micro_kernel_config->fused_sigmoid = 0;
i_micro_kernel_config->overwrite_C = 0;
i_micro_kernel_config->vnni_format_C = 0;
i_micro_kernel_config->decompress_A = 0;
i_micro_kernel_config->sparsity_factor_A = 1;
i_micro_kernel_config->vnni_cvt_output_ext_buf = 0;
i_micro_kernel_config->norm_to_normT_B_ext_buf = 0;
i_micro_kernel_config->stride_b_trans = 0;
i_micro_kernel_config->fused_eltwise = 0;
i_micro_kernel_config->has_colbias_act_fused = 0;
if ((i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT) ||
(i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT_DECOMPRESS_A) ||
(i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_ACT_TRANSFORM_C_NORM_TO_VNNI_EXT_BUFFER)) {
if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_OVERWRITE_C) > 0) {
i_micro_kernel_config->overwrite_C = 1;
}
if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_RELU) > 0) {
i_micro_kernel_config->fused_relu = 1;
}
if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_RELU_NOBITMASK) > 0) {
i_micro_kernel_config->fused_relu_nobitmask = 1;
}
if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_RELU_BWD) > 0) {
i_micro_kernel_config->fused_relu_bwd = 1;
}
if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_SIGM) > 0) {
i_micro_kernel_config->fused_sigmoid = 1;
}
if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_COLBIAS) > 0) {
if (i_xgemm_desc->meltw_datatype_aux == LIBXSMM_DATATYPE_BF16) {
i_micro_kernel_config->fused_bcolbias = 1;
}
if (i_xgemm_desc->meltw_datatype_aux == LIBXSMM_DATATYPE_F32) {
i_micro_kernel_config->fused_scolbias = 1;
}
}
} else {
i_micro_kernel_config->overwrite_C = 1;
i_micro_kernel_config->vnni_format_C = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_VNNI_C) > 0) ? 1 : 0;
}
/* Determine if we have to decompress A... */
if ((i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_DECOMPRESS_A) || (i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT_DECOMPRESS_A)) {
i_micro_kernel_config->decompress_A = 1;
i_micro_kernel_config->sparsity_factor_A = i_xgemm_desc->meltw_param;
}
if ((i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT) || (i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT_DECOMPRESS_A)) {
if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_RELU_BWD) > 0) {
i_micro_kernel_config->fused_relu_bwd = 1;
}
i_micro_kernel_config->has_colbias_act_fused = 1;
if (i_xgemm_desc->meltw_flags == (unsigned int)LIBXSMM_MELTW_FLAG_NONE) {
i_micro_kernel_config->has_colbias_act_fused = 0;
}
}
if ((i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_TRANSFORM_C_NORM_TO_VNNI_EXT_BUFFER) ||
(i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_ACT_TRANSFORM_C_NORM_TO_VNNI_EXT_BUFFER)) {
i_micro_kernel_config->vnni_cvt_output_ext_buf = 1;
if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_RELU_BWD) > 0) {
i_micro_kernel_config->fused_relu_bwd = 1;
}
}
if ((i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_TRANSFORM_B_NORM_TO_NORMT_EXT_BUFFER) ||
(i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT_TRANSFORM_B_NORM_TO_NORMT_EXT_BUFFER)) {
i_micro_kernel_config->norm_to_normT_B_ext_buf = 1;
}
i_micro_kernel_config->fused_eltwise = (i_micro_kernel_config->has_colbias_act_fused == 1) ? 1: 0;
if (i_micro_kernel_config->decompress_A == 1) {
i_micro_kernel_config->fused_eltwise = 1;
}
if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) {
i_micro_kernel_config->fused_eltwise = 1;
}
if (i_micro_kernel_config->norm_to_normT_B_ext_buf == 1) {
i_micro_kernel_config->fused_eltwise = 1;
i_micro_kernel_config->stride_b_trans = i_xgemm_desc->meltw_ldy;
}
if (i_micro_kernel_config->fused_relu_bwd == 1) {
i_micro_kernel_config->fused_eltwise = 1;
}
}
LIBXSMM_API_INTERN
int libxsmm_generator_gemm_get_rbp_relative_offset( libxsmm_gemm_stack_var stack_var ) {
/* The stack at exit of setup looks like this:
*
* 10th param (if applicable) <-- RBP+40
* 9th param (if applicable) <-- RBP+32
* 8th param (if applicable) <-- RBP+24
* 7th param (if applicable) <-- RBP+16
* Return address <-- RBP+8
* Entry/saved RBP <-- RBP
* prefetch A ptr <-- RBP-8
* prefetch B ptr <-- RBP-16
* Offset A array ptr <-- RBP-24
* Offset B array ptr <-- RBP-32
* Int8 scaling factor <-- RBP-40
* GEMM_scratch ptr in stack (to be filled) <-- RBP-48
* Eltwise bias ptr <-- RBP-56
* Eltwise output_ptr <-- RBP-64
* Eltwise buf1_ptr <-- RBP-72
* Eltwise buf2_ptr <-- RBP-80
* Batch-reduce count <-- RBP-88
* */
switch ( stack_var ) {
case LIBXSMM_GEMM_STACK_VAR_NONE:
return 0;
case LIBXSMM_GEMM_STACK_VAR_PFA_PTR:
return -8;
case LIBXSMM_GEMM_STACK_VAR_PFB_PTR:
return -16;
case LIBXSMM_GEMM_STACK_VAR_A_OFFS_BRGEMM_PTR:
return -24;
case LIBXSMM_GEMM_STACK_VAR_B_OFFS_BRGEMM_PTR:
return -32;
case LIBXSMM_GEMM_STACK_VAR_INT8_SCF:
return -40;
case LIBXSMM_GEMM_STACK_VAR_GEMM_SCRATCH_PTR:
return -48;
case LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR:
return -56;
case LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR:
return -64;
case LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR:
return -72;
case LIBXSMM_GEMM_STACK_VAR_ELT_BUF1:
return -72;
case LIBXSMM_GEMM_STACK_VAR_ELT_BUF2:
return -80;
case LIBXSMM_GEMM_STACK_VAR_BRCOUNT:
return -88;
case LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_B:
return -72;
case LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_C:
return -80;
case LIBXSMM_GEMM_STACK_VAR_ELT_BITMAP_PTR:
return -72;
case LIBXSMM_GEMM_STACK_VAR_ELT_DECOMPRESS_BUF:
return -80;
case LIBXSMM_GEMM_STACK_VAR_ARG_7:
return 56;
case LIBXSMM_GEMM_STACK_VAR_ARG_8:
return 64;
case LIBXSMM_GEMM_STACK_VAR_ARG_9:
return 72;
case LIBXSMM_GEMM_STACK_VAR_ARG_10:
return 80;
default:
return 0;
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_getval_stack_var( libxsmm_generated_code* io_generated_code,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
libxsmm_gemm_stack_var stack_var,
unsigned int i_gp_reg ) {
int offset = libxsmm_generator_gemm_get_rbp_relative_offset(stack_var);
/* make sure we requested a legal stack var */
if (offset == 0) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_GENERAL );
return;
}
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RBP, LIBXSMM_X86_GP_REG_UNDEF, 0, offset, i_gp_reg, 0 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_setval_stack_var( libxsmm_generated_code* io_generated_code,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
libxsmm_gemm_stack_var stack_var,
unsigned int i_gp_reg ) {
int offset = libxsmm_generator_gemm_get_rbp_relative_offset(stack_var);
/* make sure we requested to set a legal stack var */
if (offset >= 0) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_GENERAL );
return;
}
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RBP, LIBXSMM_X86_GP_REG_UNDEF, 0, offset, i_gp_reg, 1 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_fullvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
memset(io_micro_kernel_config, 0, sizeof(*io_micro_kernel_config)); /* avoid warning "maybe used uninitialized" */
libxsmm_generator_gemm_setup_fusion_microkernel_properties_v2(i_xgemm_desc, io_micro_kernel_config);
if ( (i_arch <= LIBXSMM_TARGET_ARCH_GENERIC) || (i_arch > LIBXSMM_X86_ALLFEAT) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size_in = 0;
io_micro_kernel_config->datatype_size_out = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE42 ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size_in = 8;
io_micro_kernel_config->datatype_size_out = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
if ( i_arch == LIBXSMM_X86_GENERIC ) {
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPD;
} else {
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPD;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size_in = 4;
io_micro_kernel_config->datatype_size_out = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPS;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPS;
}
} else if ( i_arch <= LIBXSMM_X86_AVX2 ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'y';
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size_in = 8;
io_micro_kernel_config->datatype_size_out = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
}
} else {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size_in = 4;
io_micro_kernel_config->datatype_size_out = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
}
}
} else if ( i_arch < LIBXSMM_X86_AVX512) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 32;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'y';
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size_in = 8;
io_micro_kernel_config->datatype_size_out = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else if ( LIBXSMM_DATATYPE_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size_in = 4;
io_micro_kernel_config->datatype_size_out = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else if ( LIBXSMM_DATATYPE_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size_in = 4;
if ( LIBXSMM_DATATYPE_I16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->datatype_size_out = 2;
} else {
io_micro_kernel_config->datatype_size_out = 4;
}
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPWSSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size_in = 4;
if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->datatype_size_out = 1;
} else {
io_micro_kernel_config->datatype_size_out = 4;
}
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPBUSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else if ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) &&
((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_VNNI_A) > 0) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size_in = 4;
if ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->datatype_size_out = 2;
} else {
io_micro_kernel_config->datatype_size_out = 4;
}
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VDPBF16PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else if ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) &&
((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_VNNI_A) == 0) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size_in = 2;
if ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->datatype_size_out = 2;
} else {
io_micro_kernel_config->datatype_size_out = 4;
}
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTW;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
/* shouldn't happen as we caught this case earlier */
io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size_in = 0;
io_micro_kernel_config->datatype_size_out = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 32;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'z';
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size_in = 8;
io_micro_kernel_config->datatype_size_out = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else if ( LIBXSMM_DATATYPE_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size_in = 4;
io_micro_kernel_config->datatype_size_out = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else if ( LIBXSMM_DATATYPE_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size_in = 4;
if ( LIBXSMM_DATATYPE_I16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->datatype_size_out = 2;
} else {
io_micro_kernel_config->datatype_size_out = 4;
}
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPWSSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size_in = 4;
if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->datatype_size_out = 1;
} else {
io_micro_kernel_config->datatype_size_out = 4;
}
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPBUSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else if ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) &&
((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_VNNI_A) > 0) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size_in = 4;
if ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->datatype_size_out = 2;
} else {
io_micro_kernel_config->datatype_size_out = 4;
}
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VDPBF16PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else if ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) &&
((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_VNNI_A) == 0) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size_in = 2;
if ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->datatype_size_out = 2;
} else {
io_micro_kernel_config->datatype_size_out = 4;
}
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTW;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
/* shouldn't happen as we caught this case earlier */
io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size_in = 0;
io_micro_kernel_config->datatype_size_out = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
/* that should no happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_halfvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
libxsmm_generator_gemm_setup_fusion_microkernel_properties_v2(i_xgemm_desc, io_micro_kernel_config);
if ( (i_arch <= LIBXSMM_TARGET_ARCH_GENERIC) || (i_arch > LIBXSMM_X86_ALLFEAT) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size_in = 0;
io_micro_kernel_config->datatype_size_out = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE42 ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, redirecting to scalar, please fix the generation code!!!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c );
} else if ( i_arch <= LIBXSMM_X86_AVX2 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size_in = 8;
io_micro_kernel_config->datatype_size_out = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size_in = 4;
io_micro_kernel_config->datatype_size_out = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, AVX512 redirecting to fullvector!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c );
} else {
/* should not happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_scalar( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
libxsmm_generator_gemm_setup_fusion_microkernel_properties_v2(i_xgemm_desc, io_micro_kernel_config);
if ( ( i_arch <= LIBXSMM_TARGET_ARCH_GENERIC ) || ( i_arch > LIBXSMM_X86_ALLFEAT ) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size_in = 0;
io_micro_kernel_config->datatype_size_out = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE42 ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size_in = 8;
io_micro_kernel_config->datatype_size_out = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size_in = 4;
io_micro_kernel_config->datatype_size_out = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSS;
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size_in = 8;
io_micro_kernel_config->datatype_size_out = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size_in = 4;
io_micro_kernel_config->datatype_size_out = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
}
} else {
/* should not happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_add_flop_counter( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc ) {
if ( io_generated_code->code_type == 0 ) {
char l_new_code[512];
const unsigned int l_max_code_length = sizeof(l_new_code) - 1;
int l_code_length = 0;
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifndef NDEBUG\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifdef _OPENMP\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#pragma omp atomic\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "libxsmm_num_total_flops += %u;\n", 2u * i_xgemm_desc->m * i_xgemm_desc->n * i_xgemm_desc->k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking,
const unsigned int i_k_blocking ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_kloop, 0);
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_kloop, i_k_blocking);
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_max_blocked_k,
const unsigned int i_kloop_complete ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_kloop, i_max_blocked_k );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
if ( i_kloop_complete != 0 ) {
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_xgemm_desc->ldb * i_xgemm_desc->k * i_micro_kernel_config->datatype_size_in;
} else {
l_b_offset = i_xgemm_desc->k * i_micro_kernel_config->datatype_size_in;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_b, l_b_offset );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_reduceloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 0);
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_reduceloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc) {
LIBXSMM_UNUSED(i_xgemm_desc);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 1);
libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_reduce_count, i_gp_reg_mapping->gp_reg_reduce_loop);
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_n_init,
const unsigned int i_n_blocking) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_init );
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_blocking );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_n_blocking,
const unsigned int i_n_done ) {
if ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
if (i_micro_kernel_config->vnni_format_C == 0) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*2 /*(i_micro_kernel_config->datatype_size/2)*/) - ((i_xgemm_desc->m) * 2 /*(i_micro_kernel_config->datatype_size/2)*/) );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*2 /*(i_micro_kernel_config->datatype_size/2)*/) - ((i_xgemm_desc->m) * 2 * 2 /*(i_micro_kernel_config->datatype_size/2)*/) );
}
} else if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)/**(i_micro_kernel_config->datatype_size/4)*/) - ((i_xgemm_desc->m) /** (i_micro_kernel_config->datatype_size/4)*/) );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size_out)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_out)) );
}
/* Also adjust eltwise pointers */
if ((i_micro_kernel_config->fused_relu == 1) || (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) || (i_micro_kernel_config->fused_relu_bwd == 1) || (i_micro_kernel_config->fused_bcolbias == 1) || (i_micro_kernel_config->fused_scolbias == 1) || (i_micro_kernel_config->overwrite_C == 0)) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
}
if ((i_micro_kernel_config->fused_relu == 1) && (i_micro_kernel_config->overwrite_C == 1) ) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, (i_n_blocking*i_xgemm_desc->ldc)/8 - ((i_xgemm_desc->m/8) ) );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, (i_n_blocking*(i_xgemm_desc->ldc)*2/*(i_micro_kernel_config->datatype_size/2)*/) - ((i_xgemm_desc->m) * 2 * 2 /*(i_micro_kernel_config->datatype_size/2)*/) );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
if (i_micro_kernel_config->fused_relu_bwd == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, (i_n_blocking*i_xgemm_desc->ldc)/8 - ((i_xgemm_desc->m/8) ) );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
/* In this case also advance the output ptr */
if (i_micro_kernel_config->overwrite_C == 0) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, (i_n_blocking*(i_xgemm_desc->ldc)*2/*(i_micro_kernel_config->datatype_size/2)*/) - ((i_xgemm_desc->m) * 2 /*(i_micro_kernel_config->datatype_size/2)*/) );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
if (i_micro_kernel_config->fused_bcolbias == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ( i_xgemm_desc->m * 2/*(i_micro_kernel_config->datatype_size/2)*/) );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
if (i_micro_kernel_config->fused_scolbias == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ( i_xgemm_desc->m * 4/*i_micro_kernel_config->datatype_size*/) );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
if ((i_micro_kernel_config->fused_relu == 1) || (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) || (i_micro_kernel_config->fused_relu_bwd == 1) || (i_micro_kernel_config->fused_bcolbias == 1) || (i_micro_kernel_config->fused_scolbias == 1) || (i_micro_kernel_config->overwrite_C == 0)) {
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
}
/* B prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b_prefetch,
(i_n_blocking*(i_xgemm_desc->ldc)*i_micro_kernel_config->datatype_size_in) - ((i_xgemm_desc->m)*i_micro_kernel_config->datatype_size_in) );
}
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size_out)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_out)) );
}
#endif
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
/* handle trans B */
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size_in;
} else {
l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size_in;
}
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_in)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_b,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_help_0, l_b_offset );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_b,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_in)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
}
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
/* handle trans B */
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size_in;
} else {
l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size_in;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b, l_b_offset );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_in)) );
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_in)) );
}
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_init,
const unsigned int i_m_blocking ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_init );
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_blocking );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_m_done ) {
/* advance C pointer */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size_out) );
/* Also adjust eltwise pointers */
if ((i_micro_kernel_config->fused_relu == 1) || (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) || (i_micro_kernel_config->fused_relu_bwd == 1) || (i_micro_kernel_config->fused_bcolbias == 1) || (i_micro_kernel_config->fused_scolbias == 1) || (i_micro_kernel_config->overwrite_C == 0)) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
}
if ((i_micro_kernel_config->fused_relu == 1) && (i_micro_kernel_config->overwrite_C == 1) ) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking/8 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking*2*2/*(i_micro_kernel_config->datatype_size/2)*/ );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
if (i_micro_kernel_config->fused_relu_bwd == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking/8 );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
if (i_micro_kernel_config->overwrite_C == 0) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking*2/*(i_micro_kernel_config->datatype_size/2)*/ );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
if (i_micro_kernel_config->fused_bcolbias == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking * 2/*(i_micro_kernel_config->datatype_size/2)*/ );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
if (i_micro_kernel_config->fused_scolbias == 1) {
libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking * 4 /*i_micro_kernel_config->datatype_size*/ );
libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 );
}
if ((i_micro_kernel_config->fused_relu == 1) || (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) || (i_micro_kernel_config->fused_relu_bwd == 1) || (i_micro_kernel_config->fused_bcolbias == 1) || (i_micro_kernel_config->fused_scolbias == 1) || (i_micro_kernel_config->overwrite_C == 0)) {
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
}
/* C prefetch */
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size_out) );
}
#endif
/* B prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch, i_m_blocking*i_micro_kernel_config->datatype_size_in );
}
}
/* A prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) {
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size_in) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size_in)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
}
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size_in) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size_in)) );
}
}
/* advance A pointer */
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size_in) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size_in)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size_in) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size_in)) );
}
/* loop handling */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_load_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking ) {
unsigned int l_m_blocking, l_vec_reg_acc_start;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
assert(0 < i_micro_kernel_config->vector_length);
/* deriving register blocking from kernel config */
l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1;
/* start register of accumulator */
l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
#if !defined(NDEBUG)
/* Do some test if it is possible to generate the requested code.
This is not done in release mode and therefore bad
things might happen.... HUAAH */
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_GENERIC ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE42 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256 ||i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CLX
||i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX) {
if ( (i_n_blocking > 28) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 8) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking != 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
#if 0
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif
#endif /*!defined(NDEBUG)*/
/* load C accumulator */
if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */
/* pure BF16 kernel */
if ( ( ((i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT)) ||
((i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ))
) && ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we add when scaling during conversion to FP32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* load 16 bit values into xmm portion of the register */
if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU16,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'y' : 'z',
0, 2, 1, 0 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'x' : 'y',
0, 0, 1, 0 );
}
/* convert 16 bit values into 32 bit (integer convert) */
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPMOVSXWD,
i_micro_kernel_config->vector_name,
0, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
/* shift 16 bits to the left to generate valid FP32 numbers */
libxsmm_x86_instruction_vec_compute_2reg_imm8(io_generated_code,
LIBXSMM_X86_INSTR_VPSLLD_I,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
16);
}
}
/* Check if we have to add bias */
if (i_micro_kernel_config->fused_bcolbias == 1) {
libxsmm_generator_gemm_add_colbias_to_2D_block( io_generated_code, i_gp_reg_mapping, i_micro_kernel_config,
LIBXSMM_DATATYPE_BF16, l_vec_reg_acc_start, l_m_blocking, i_n_blocking );
}
/* pure int8 kernel */
} else if ( ( ((i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT)) ||
((i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) )
) &&
( (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we need to up convert int8 to int32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* load 16 bit values into xmm portion of the register*/
if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU8,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'y' : 'z',
0, 2, 1, 0 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
'x',
0, 0, 1, 0 );
}
/* convert 8 bit values into 32 bit (integer convert) */
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED) != 0 ) {
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPMOVZXBD,
i_micro_kernel_config->vector_name,
0, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
} else {
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPMOVSXBD,
i_micro_kernel_config->vector_name,
0, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
}
} else {
/* adding to C, so let's load C */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* we only mask the last m-blocked load */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out));
}
}
#endif
}
/* Check if we have to add bias */
if (i_micro_kernel_config->fused_scolbias == 1) {
libxsmm_generator_gemm_add_colbias_to_2D_block( io_generated_code, i_gp_reg_mapping, i_micro_kernel_config,
LIBXSMM_DATATYPE_F32, l_vec_reg_acc_start, l_m_blocking, i_n_blocking );
}
}
} else {
if (i_micro_kernel_config->fused_scolbias == 1) {
libxsmm_generator_gemm_load_colbias_to_2D_block( io_generated_code, i_gp_reg_mapping, i_micro_kernel_config,
LIBXSMM_DATATYPE_F32, l_vec_reg_acc_start, l_m_blocking, i_n_blocking );
} else if (i_micro_kernel_config->fused_bcolbias == 1) {
libxsmm_generator_gemm_load_colbias_to_2D_block( io_generated_code, i_gp_reg_mapping, i_micro_kernel_config,
LIBXSMM_DATATYPE_BF16, l_vec_reg_acc_start, l_m_blocking, i_n_blocking );
} else {
/* overwriting C, so let's xout accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* @TODO: cannot migrate to new encoder as this is also SSE */
if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) && LIBXSMM_DATATYPE_I32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype )){
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), 0, 1, 0 );
} else {
if ( io_generated_code->arch >= LIBXSMM_X86_AVX ) {
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
} else {
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out));
}
}
#endif
}
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_store_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking )
{
/* deriving register blocking from kernel config */
unsigned int l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
/* start register of accumulator */
unsigned int l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
/* select store instruction */
unsigned int l_vstore = (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT == (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT & i_xgemm_desc->flags)) ? i_micro_kernel_config->c_vmove_nts_instruction : i_micro_kernel_config->c_vmove_instruction;
libxsmm_micro_kernel_config l_micro_kernel_config_mod;
libxsmm_micro_kernel_config *i_micro_kernel_config_mod = (libxsmm_micro_kernel_config*) &l_micro_kernel_config_mod;
memcpy(i_micro_kernel_config_mod, i_micro_kernel_config, sizeof(libxsmm_micro_kernel_config));
/* @TODO fix this test */
#if !defined(NDEBUG)
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_GENERIC ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE42 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256 || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CLX
|| i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX) {
if ( (i_n_blocking > 28) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 8) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX512_VL256 ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_VL256 ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
#if 0
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif
#endif
if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CLX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256) ) &&
( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
const unsigned int relu_bitmask_gpr = i_gp_reg_mapping->gp_reg_help_2;
const unsigned int scratch_gpr = i_gp_reg_mapping->gp_reg_help_2;
const unsigned int aux_gpr = i_gp_reg_mapping->gp_reg_help_1;
const unsigned int aux_vreg = 1;
const unsigned int zero_vreg = 0;
/* Check out if fusion has to be applied */
if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) {
libxsmm_generator_gemm_prepare_relu_fusion( io_generated_code, i_micro_kernel_config,
zero_vreg, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr);
} else if (i_micro_kernel_config->fused_sigmoid == 1) {
libxsmm_generator_gemm_dump_2D_block_and_prepare_sigmoid_fusion( io_generated_code, i_micro_kernel_config_mod,
l_vec_reg_acc_start, l_m_blocking, i_n_blocking, scratch_gpr, aux_gpr);
}
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) {
unsigned int bitmask_offset = (io_generated_code->arch < LIBXSMM_X86_AVX512) ? ((l_n * i_xgemm_desc->ldc) + (l_m * 8))/8 : ((l_n * i_xgemm_desc->ldc) + (l_m * 16))/8;
libxsmm_generator_gemm_apply_relu_to_vreg( io_generated_code, i_micro_kernel_config,
zero_vreg, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), i_micro_kernel_config->fused_relu, relu_bitmask_gpr, bitmask_offset, 1, aux_gpr, aux_vreg);
} else if (i_micro_kernel_config->fused_sigmoid == 1) {
unsigned int tmp_vreg = 0;
libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod,
scratch_gpr, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), tmp_vreg );
/* Store vreg back to scratch */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVUPS,
scratch_gpr,
LIBXSMM_X86_GP_REG_UNDEF, 0,
(l_vec_reg_acc_start + l_m + (l_m_blocking * l_n)) * 64,
i_micro_kernel_config->vector_name,
tmp_vreg, 0, 0, 1 );
}
}
}
if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) {
libxsmm_generator_gemm_cleanup_relu_fusion( io_generated_code, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr);
} else if (i_micro_kernel_config->fused_sigmoid == 1) {
/* Restore accumulators from scratch */
libxsmm_generator_gemm_restore_2D_regblock_from_scratch( io_generated_code, i_micro_kernel_config,
scratch_gpr, l_vec_reg_acc_start, l_m_blocking, i_n_blocking);
libxsmm_generator_gemm_cleanup_sigmoid_fusion( io_generated_code, scratch_gpr, aux_gpr );
}
/* init stack with helper variables for SW-based RNE rounding */
/* push 0x7f800000 on the stack, naninf masking */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x7f800000);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* push 0x00010000 on the stack, fixup masking */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00010000);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* push 0x00007fff on the stack, rneadd */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00007fff);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* push 0x00000001 on the stack, fixup */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00000001);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* storing downconverted and rounded C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
/* and with naninf */
libxsmm_x86_instruction_vec_compute_mem_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPANDD,
i_micro_kernel_config->vector_name,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
24, 1,
reg_X,
0 );
/* and with fixup */
libxsmm_x86_instruction_vec_compute_mem_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPANDD,
i_micro_kernel_config->vector_name,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
16, 1,
reg_X,
1 );
/* compute naninf mask k7 */
libxsmm_x86_instruction_vec_compute_mem_2reg_imm8( io_generated_code,
LIBXSMM_X86_INSTR_VPCMPD,
i_micro_kernel_config->vector_name,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
24,
1,
0,
7,
4 );
/* compute fixup mask k6 */
libxsmm_x86_instruction_vec_compute_mem_2reg_imm8( io_generated_code,
LIBXSMM_X86_INSTR_VPCMPD,
i_micro_kernel_config->vector_name,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
16,
1,
1,
6,
0 );
/* load rneadd */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF, 0,
8,
i_micro_kernel_config->vector_name,
0, 0, 1, 0 );
/* load fixup */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF, 0,
0,
i_micro_kernel_config->vector_name,
1, 0, 1, 0 );
/* compute fixup */
libxsmm_x86_instruction_vec_compute_3reg_mask( io_generated_code,
LIBXSMM_X86_INSTR_VPADDD,
i_micro_kernel_config->vector_name,
1,
0,
0,
6,
0 );
/* compute fixup */
libxsmm_x86_instruction_vec_compute_3reg_mask( io_generated_code,
LIBXSMM_X86_INSTR_VPADDD,
i_micro_kernel_config->vector_name,
0,
reg_X,
reg_X,
7,
0 );
/* shift FP32 by 16bit to right */
libxsmm_x86_instruction_vec_compute_2reg_imm8(io_generated_code,
LIBXSMM_X86_INSTR_VPSRAD_I,
i_micro_kernel_config->vector_name,
reg_X,
reg_X,
16);
/* shift FP32 by 16bit to right */
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPMOVDW,
i_micro_kernel_config->vector_name,
reg_X,
0 );
/* store 16 bit values into xmm portion of the register */
if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU16,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
( ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CLX) ) ? 'y' : 'z',
0, 2, 0, 1 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
( ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CLX) ) ? 'x' : 'y',
0, 0, 0, 1 );
}
}
}
/* clean stack and restore help5 */
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
} else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT)
&& ((i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CPX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX))
) &&
( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) )
) {
const unsigned int relu_bitmask_gpr = i_gp_reg_mapping->gp_reg_help_2;
const unsigned int scratch_gpr = i_gp_reg_mapping->gp_reg_help_2;
const unsigned int aux_gpr = i_gp_reg_mapping->gp_reg_help_1;
const unsigned int zero_vreg = 1;
const unsigned int aux_vreg = 2;
/* storing downconverted and rounded C accumulator */
/* Check out if fusion has to be applied */
if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) {
libxsmm_generator_gemm_prepare_relu_fusion( io_generated_code, i_micro_kernel_config,
zero_vreg, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr);
} else if (i_micro_kernel_config->fused_sigmoid == 1) {
/* First dump the accumulators to scratch and then setup sigmoid coeffcients to be reused */
libxsmm_generator_gemm_dump_2D_block_and_prepare_sigmoid_fusion( io_generated_code, i_micro_kernel_config_mod,
l_vec_reg_acc_start, l_m_blocking, i_n_blocking, scratch_gpr, aux_gpr);
}
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
unsigned int l_m_2_blocking = (l_m_blocking/2)*2;
l_m = 0;
if ( i_micro_kernel_config->use_masking_a_c != 0 ) {
for ( l_m = 0 ; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) {
unsigned int bitmask_offset = (io_generated_code->arch < LIBXSMM_X86_AVX512) ? ((l_n * i_xgemm_desc->ldc) + (l_m * 8))/8 : ((l_n * i_xgemm_desc->ldc) + (l_m * 16))/8;
libxsmm_generator_gemm_apply_relu_to_vreg( io_generated_code, i_micro_kernel_config,
zero_vreg, reg_X, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, bitmask_offset, 1, aux_gpr, aux_vreg);
} else if (i_micro_kernel_config->fused_sigmoid == 1) {
unsigned int tmp_vreg = 0;
libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod,
scratch_gpr, reg_X, tmp_vreg );
reg_X = tmp_vreg;
}
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VCVTNEPS2BF16,
i_micro_kernel_config->vector_name,
reg_X, 0 );
/* store 16 bit values into ymm portion of the register bfloat mask fix can lead to errors x should not be masked */
if ( l_m == (l_m_blocking - 1) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU16,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX ) ? 'y' : 'z',
0, 2, 0, 1 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX ) ? 'x' : 'y',
0, 0, 0, 1 );
}
}
} else {
for (; l_m < l_m_2_blocking; l_m+=2 ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
unsigned int reg_X2 = l_vec_reg_acc_start + l_m+1 + (l_m_blocking * l_n);
if (i_micro_kernel_config->fused_sigmoid == 1) {
unsigned int tmp_vreg = 0;
unsigned int tmp_vreg2 = 1;
libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod,
scratch_gpr, reg_X, tmp_vreg );
libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod,
scratch_gpr, reg_X2, tmp_vreg2 );
reg_X = tmp_vreg;
reg_X2 = tmp_vreg2;
}
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code,
LIBXSMM_X86_INSTR_VCVTNE2PS2BF16,
i_micro_kernel_config->vector_name,
reg_X, reg_X2, 0 );
if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) {
unsigned int bitmask_offset = (io_generated_code->arch < LIBXSMM_X86_AVX512) ? ((l_n * i_xgemm_desc->ldc) + (l_m * 8))/8 : ((l_n * i_xgemm_desc->ldc) + (l_m * 16))/8;
libxsmm_generator_gemm_apply_relu_to_vreg( io_generated_code, i_micro_kernel_config,
zero_vreg, 0, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, bitmask_offset, 0, aux_gpr, aux_vreg);
}
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX ) ? 'y' : 'z',
0, 0, 0, 1 );
}
for (; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) {
unsigned int bitmask_offset = (io_generated_code->arch < LIBXSMM_X86_AVX512) ? ((l_n * i_xgemm_desc->ldc) + (l_m * 8))/8 : ((l_n * i_xgemm_desc->ldc) + (l_m * 16))/8;
libxsmm_generator_gemm_apply_relu_to_vreg( io_generated_code, i_micro_kernel_config,
zero_vreg, reg_X, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, bitmask_offset, 1, aux_gpr, aux_vreg);
} else if (i_micro_kernel_config->fused_sigmoid == 1) {
unsigned int tmp_vreg = 0;
libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod,
scratch_gpr, reg_X, tmp_vreg );
reg_X = tmp_vreg;
}
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VCVTNEPS2BF16,
i_micro_kernel_config->vector_name,
reg_X, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX ) ? 'x' : 'y',
0, 0, 0, 1 );
}
}
}
if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) {
libxsmm_generator_gemm_cleanup_relu_fusion( io_generated_code, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr);
} else if (i_micro_kernel_config->fused_sigmoid == 1) {
libxsmm_generator_gemm_cleanup_sigmoid_fusion( io_generated_code, scratch_gpr, aux_gpr );
}
} else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) && (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_VL256) ) &&
( (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* pick the right instrucitons */
unsigned int inst_f32_i32 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VCVTPS2UDQ : LIBXSMM_X86_INSTR_VCVTPS2DQ;
unsigned int inst_i32_i8 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VPMOVUSDB : LIBXSMM_X86_INSTR_VPMOVSDB;
/* there are case where we need to load the scaling factor's address from the stack argument list */
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) != 0 ) {
libxsmm_x86_instruction_load_arg_to_reg( io_generated_code, 0, i_gp_reg_mapping->gp_reg_scf );
}
/* loading scf into register 3 */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
i_gp_reg_mapping->gp_reg_scf,
LIBXSMM_X86_GP_REG_UNDEF, 0, 0,
i_micro_kernel_config->vector_name,
3, 0, 1, 0 );
/* Zero out register 0 to perform relu */
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
0,
0,
0);
/* storing downconverted and rounded C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
/* Convert result to F32 */
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VCVTDQ2PS,
i_micro_kernel_config->vector_name,
reg_X,
reg_X );
/* Multiply with scaling factor */
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code,
LIBXSMM_X86_INSTR_VMULPS,
i_micro_kernel_config->vector_name,
reg_X,
3,
reg_X );
/* Perform RELU */
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code,
LIBXSMM_X86_INSTR_VMAXPS,
i_micro_kernel_config->vector_name,
reg_X,
0,
reg_X);
/* Round result to int32 */
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
inst_f32_i32,
i_micro_kernel_config->vector_name,
reg_X, reg_X );
/* down-convert to int8 */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
inst_i32_i8,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
i_micro_kernel_config->vector_name,
reg_X, ( ( l_m == (l_m_blocking - 1)) && ( i_micro_kernel_config->use_masking_a_c != 0 ) ) ? 2 : 0, 0, 1 );
}
}
} else {
/* storing C accumulator */
const unsigned int relu_bitmask_gpr = i_gp_reg_mapping->gp_reg_help_2;
const unsigned int scratch_gpr = i_gp_reg_mapping->gp_reg_help_2;
const unsigned int aux_gpr = i_gp_reg_mapping->gp_reg_help_1;
const unsigned int zero_vreg = 0;
const unsigned int aux_vreg = 1;
/* Check out if fusion has to be applied */
if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) {
libxsmm_generator_gemm_prepare_relu_fusion( io_generated_code, i_micro_kernel_config,
zero_vreg, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr);
} else if (i_micro_kernel_config->fused_sigmoid == 1) {
libxsmm_generator_gemm_dump_2D_block_and_prepare_sigmoid_fusion( io_generated_code, i_micro_kernel_config_mod,
l_vec_reg_acc_start, l_m_blocking, i_n_blocking, scratch_gpr, aux_gpr);
}
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) {
unsigned int bitmask_offset = (io_generated_code->arch < LIBXSMM_X86_AVX512) ? ((l_n * i_xgemm_desc->ldc) + (l_m * 8))/8 : ((l_n * i_xgemm_desc->ldc) + (l_m * 16))/8;
libxsmm_generator_gemm_apply_relu_to_vreg( io_generated_code, i_micro_kernel_config,
zero_vreg, reg_X, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, bitmask_offset, 1, aux_gpr, aux_vreg);
} else if (i_micro_kernel_config->fused_sigmoid == 1) {
unsigned int tmp_vreg = 0;
libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod,
scratch_gpr, reg_X, tmp_vreg );
reg_X = tmp_vreg;
}
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out),
i_micro_kernel_config->vector_name,
reg_X, ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 0, 1 );
}
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
/* determining how many prefetches we need in M direction as we just need one prefetch per cache line */
unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size_out)); /* 64: hardcoded cache line length */
for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out));
}
}
}
}
if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) {
libxsmm_generator_gemm_cleanup_relu_fusion( io_generated_code, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr );
} else if (i_micro_kernel_config->fused_sigmoid == 1) {
libxsmm_generator_gemm_cleanup_sigmoid_fusion( io_generated_code, scratch_gpr, aux_gpr );
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_initialize_avx512_mask( libxsmm_generated_code* io_generated_code,
const unsigned int i_gp_reg_tmp,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_mask_count ) {
unsigned int l_mask;
/* init full mask */
if( ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype)) &&
((io_generated_code->arch == LIBXSMM_X86_AVX512_VL256 )|| (io_generated_code->arch == LIBXSMM_X86_AVX512_VL256_CPX )||
(io_generated_code->arch == LIBXSMM_X86_AVX512_VL256_CLX )) ){
l_mask = 0xf;
} else if ( ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ||
((io_generated_code->arch == LIBXSMM_X86_AVX512_VL256 )|| (io_generated_code->arch == LIBXSMM_X86_AVX512_VL256_CPX )||
(io_generated_code->arch == LIBXSMM_X86_AVX512_VL256_CLX )) ){
l_mask = 0xff;
} else {
l_mask = 0xffff;
}
/* shift right by "inverse" remainder */
l_mask = l_mask >> i_mask_count;
/* move mask to GP register */
libxsmm_x86_instruction_alu_imm( io_generated_code,
LIBXSMM_X86_INSTR_MOVQ,
i_gp_reg_tmp,
l_mask );
if ( ( io_generated_code->arch >= LIBXSMM_X86_AVX512_VL256 ) && ( io_generated_code->arch <= LIBXSMM_X86_ALLFEAT ) ) {
libxsmm_x86_instruction_mask_move( io_generated_code,
LIBXSMM_X86_INSTR_KMOVW_GPR_LD,
i_gp_reg_tmp,
LIBXSMM_X86_AVX512_MASK );
if ( ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) {
libxsmm_x86_instruction_mask_move( io_generated_code,
LIBXSMM_X86_INSTR_KMOVD_GPR_LD,
i_gp_reg_tmp,
2 );
} else if ( ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) {
libxsmm_x86_instruction_mask_move( io_generated_code,
LIBXSMM_X86_INSTR_KMOVQ_GPR_LD,
i_gp_reg_tmp,
2 );
} else {
/* no addtional mask is needed */
}
} else {
/* shouldn't happen */
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH );
return;
}
}
|
debug_test_system.h | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2013, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>
// ==========================================================================
// The SeqAn testing infrastructure. Based on ideas from the OpenMS
// "ClassTest.h".
// ==========================================================================
// TODO(holtgrew): This could use some cleanup.
// SEQAN_NO_GENERATED_FORWARDS
#ifndef SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
#define SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
#include <iostream> // stdout, stderr
#include <iomanip>
#include <cstring> // strrpos
#include <cstdlib> // exit()
#include <cstdio>
#include <cstdarg> // va_start, va_list, va_end
#include <set>
#include <vector>
#include <string>
#ifdef PLATFORM_WINDOWS
#include <Windows.h> // DeleteFile()
#else // #ifdef PLATFORM_WINDOWS
#include <unistd.h> // unlink()
#include <sys/stat.h> // mkdir()
#include <dirent.h> // DIR
#if SEQAN_HAS_EXECINFO
#include <execinfo.h> // backtrace(), backtrace_symbols()
#endif // #if SEQAN_HAS_EXECINFO
#include <cxxabi.h> // __cxa_demangle()
#include <signal.h>
#endif // #ifdef PLATFORM_WINDOWS
/**
.Macro.SEQAN_FAIL
..cat:Assertions
..summary:Force abortion of program, regardless of debugging settings.
..signature:SEQAN_FAIL(msg[, args])
..param.msg:A format string.
..param.args:An optional list of arguments.
..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values.
..example.text:In the following example, the $SEQAN_FAIL$ is there if a possible value is added to $MyEnum$ but the function $foo$ is not updated accordingly.
..example.code:
enum MyEnum {
VALUE_ONE,
VALUE_TWO
};
bool foo(MyEnum x) {
switch (x) {
case VALUE_ONE:
// do something
return true;
case VALUE_TWO:
// do something
return true;
}
SEQAN_FAIL("Logic error. Should never reach here. x == %d.", x);
return false;
}
..include:seqan/basic.h
..see:Macro.SEQAN_CHECK
*/
#define SEQAN_FAIL(...) \
do { \
::seqan::ClassTest::forceFail(__FILE__, __LINE__, \
__VA_ARGS__); \
::seqan::ClassTest::fail(); \
} while (false)
/**
.Macro.SEQAN_CHECK
..cat:Assertions
..summary:Force abortion of program if a condition is not met, regardless of debugging settings.
..signature:SEQAN_CHECK(condition, msg[, args])
..param.msg:A format string.
..param.args:An optional list of arguments.
..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values.
..example.text:In the following example, the $SEQAN_CHECK$ stops program execution if a value is added to $MyEnum$ but the function $foo$ is not updated accordingly.
..example.code:
enum MyEnum {
VALUE_ONE,
VALUE_TWO
};
bool foo(MyEnum x) {
SEQAN_CHECK((x == VALUE_ONE || x == VALUE_TWO), "Invalid value for x == %d.", x);
switch (x) {
case VALUE_ONE:
// do something
return true;
case VALUE_TWO:
// do something
return true;
}
return false; // Should never reach here, checked above with SEQAN_CHECK.
}
..include:seqan/basic.h
..see:Macro.SEQAN_FAIL
*/
#define SEQAN_CHECK(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// SeqAn's has three global debug/testing levels: testing, debug and
// release. Depending on the level, the SEQAN_ASSERT_* and
// SEQAN_CHECKPOINT macros will be enabled.
//
// Note that this is independent of the <cassert> assertions and
// NDEBUG being defined.
//
// The levels are enabled by the values of the macros
// SEQAN_ENABLE_TESTING and SEQAN_ENABLE_DEBUG. By setting a macro to
// 0, one disables the level and by setting the macro to 1, one
// enables a level. Enabling testing also enables debug, overriding a
// value of 0 for SEQAN_ENABLE_DEBUG.
//
// If the level is release (both the macros for debug and testing are
// 0), the assertions will be disabled. If the level is debug then
// the assertions will be enabled. If the level is testing then the
// checkpoint macros will also be enabled.
//
// The default is to enable debugging but disable testing.
//
// You can print the current level using the function seqan::printDebugLevel().
/**
.Macro.SEQAN_ENABLE_TESTING
..cat:Testing & Debugging
..summary:Indicates whether testing is enabled.
..signature:SEQAN_ENABLE_DEBUG
..remarks:When enabled (set to 1), testing is enabled. This means the macros for the tests (@Macro.SEQAN_BEGIN_TESTSUITE@, @Macro.SEQAN_DEFINE_TEST@, @Macro.SEQAN_CALL_TEST@, and @Macro.SEQAN_END_TESTSUITE@) will be enabled. This makes failing assertions raise exceptions instead of call $abort()$ and enables checkpoints.
..remarks:By default, this is set to 0.
..remarks:If @Macro.SEQAN_ENABLE_CHECKPOINTS@ is not defined before including $<seqan/basic.h>$, then @Macro.SEQAN_ENABLE_CHECKPOINTS@ will be set to the value of @Macro.SEQAN_ENABLE_TESTING@ (after the default initialization to 0).
..remarks:If you want to change this value, you have to define this value before including any SeqAn header.
..remarks:If set to 1 then @Macro.SEQAN_ENABLE_TESTING@ is force-set to 0 as well.
..see:Macro.SEQAN_ENABLE_DEBUG
..see:Macro.SEQAN_ENABLE_CHECKPOINTS
*/
// Set default for SEQAN_ENABLE_TESTING.
#ifndef SEQAN_ENABLE_TESTING
#define SEQAN_ENABLE_TESTING 0
#endif // #ifndef SEQAN_ENABLE_TESTING
/**
.Macro.SEQAN_ENABLE_DEBUG
..cat:Testing & Debugging
..summary:Indicates whether debugging is enabled.
..signature:SEQAN_ENABLE_DEBUG
..remarks:When enabled (set to 1), debugging is enabled. This means the assertion macros are expanded to actual code and not to nothing.
..remarks:By default, this is set to 0 if $NDEBUG$ is defined and to 1 if $NDEBUG$ is not defined.
..remarks:If you want to change this value, you have to define this value before including any SeqAn header.
..remarks:Force-enabled if @Macro.SEQAN_ENABLE_TESTING@ is set to 1.
..see:Macro.SEQAN_ENABLE_TESTING
..see:Macro.SEQAN_ENABLE_CHECKPOINTS
*/
// Set default for SEQAN_ENABLE_DEBUG.
#ifndef SEQAN_ENABLE_DEBUG
#ifdef NDEBUG
#define SEQAN_ENABLE_DEBUG 0
#else // #ifdef NDEBUG
#define SEQAN_ENABLE_DEBUG 1
#endif // #ifdef NDEBUG
#endif // #ifndef SEQAN_ENABLE_DEBUG
// Force-enable debugging if testing is enabled.
#if SEQAN_ENABLE_TESTING
#undef SEQAN_ENABLE_DEBUG
#define SEQAN_ENABLE_DEBUG 1
#endif // #if SEQAN_ENABLE_TESTING
/**
.Macro.SEQAN_ENABLE_CHECKPOINTS
..cat:Testing & Debugging
..summary:Indicates whether checkpoints are enabled.
..signature:SEQAN_ENABLE_CHECKPOINTS
..remarks:When enabled (set to 1), checkpoints are enabled. This means the $SEQAN_CHECKPOINT$ macros are expanded to actual code and not to nothing.
..remarks:By default, this is set to $SEQAN_ENABLE_TESTING$.
..remarks:Checkpoints can come at large increases of running time in your tests. Disable them when your test run too slow.
..remarks:If you want to change this value, you have to define this value before including any SeqAn header.
..example.text:Disable checkpoints in a program.
..example.code:
// Disable SeqAn checkpoints in this program.
#define SEQAN_ENABLE_CHECKPOINTS 0
// Any SeqAn headers or headers including SeqAn headers have to come AFTER the
// definition of SEQAN_ENABLE_CHECKPOINT above.
#include <seqan/base.h>
int main(int argc, char const ** argv)
{
// Any call to SeqAn functions will NOT log any checkpoints.
return 0;
}
..see:Macro.SEQAN_ENABLE_DEBUG
..see:Macro.SEQAN_ENABLE_TESTING
*/
// Allow disabling checkpoints independent of testing.
#ifndef SEQAN_ENABLE_CHECKPOINTS
#define SEQAN_ENABLE_CHECKPOINTS 0 // SEQAN_ENABLE_TESTING
#endif // #ifndef SEQAN_ENABLE_CHECKPOINTS
/**
.Macro.SEQAN_TYPEDEF_FOR_DEBUG
..cat:Testing & Debugging
..summary: When using typedefs that are only used in debug mode then they have to be marked with macro.
..signature:SEQAN_TYPEDEF_FOR_DEBUG
..example.code:
typedef int TInt SEQAN_TYPEDEF_FOR_DEBUG;
*/
#if !SEQAN_ENABLE_DEBUG
# if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)))
# define SEQAN_TYPEDEF_FOR_DEBUG __attribute__((unused))
# else
# define SEQAN_TYPEDEF_FOR_DEBUG
# endif
#else
# define SEQAN_TYPEDEF_FOR_DEBUG
#endif
// TODO(holtgrew): This one is for profiling and in tests.
#if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)))
# define SEQAN_UNUSED_TYPEDEF __attribute__((unused))
#else
# define SEQAN_UNUSED_TYPEDEF
#endif
namespace seqan {
// SEQAN_CXX_FLAGS_ contains the compiler flags, SEQAN_CXX_FLAGS is a string
// literal with this value.
#if !defined(SEQAN_CXX_FLAGS_)
#define SEQAN_CXX_FLAGS_ SEQAN_CXX_FLAGS_NOT_SET
#endif // !defined(SEQAN_CXX_FLAGS__)
#define SEQAN_MKSTRING_(str) # str
#define SEQAN_MKSTRING(str) SEQAN_MKSTRING_(str)
#define SEQAN_CXX_FLAGS SEQAN_MKSTRING(SEQAN_CXX_FLAGS_)
//#undef SEQAN_MKSTRING
//#undef SEQAN_MKSTRING_
/**
.Function.printDebugLevel
..cat:Testing & Debugging
..summary:Print the current SeqAn debug level and the compiler flags to the given stream.
..signature:printDebugLevel(stream)
..param.stream:The stream to print to, e.g. $std::cout$.
..include:seqan/basic.h
*/
template <typename TStream>
void printDebugLevel(TStream & stream)
{
stream << "SEQAN_ENABLE_DEBUG == " << SEQAN_ENABLE_DEBUG << std::endl;
stream << "SEQAN_ENABLE_TESTING == " << SEQAN_ENABLE_TESTING << std::endl;
stream << "SEQAN_ENABLE_CHECKPOINTS == " << SEQAN_ENABLE_CHECKPOINTS << std::endl;
stream << "SEQAN_CXX_FLAGS == \"" << SEQAN_CXX_FLAGS << "\"" << std::endl;
}
#if defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO
template <typename TSize>
void printStackTrace(TSize /*maxFrames*/)
{}
#else
// print a demangled stack backtrace of the caller function
template <typename TSize>
void printStackTrace(TSize maxFrames)
{
void * addrlist[256];
char temp[4096];
char addr[20];
char offset[20];
size_t size;
int status;
char * symname;
char * demangled;
std::cerr << std::endl << "stack trace:" << std::endl;
int addrlist_len = backtrace(addrlist, maxFrames);
char ** symbollist = backtrace_symbols(addrlist, addrlist_len);
for (int i = 1; i < addrlist_len; ++i)
{
offset[0] = 0;
addr[0] = 0;
demangled = NULL;
// LINUX FORMAT:
// ./sam2svg [0x473b8c]
// /lib/libc.so.6 [0x7f40d2526f60]
// ./sam2svg(_Z2f3v+0x10) [0x47200c]
// ./sam2svg(_Z2f2v+0xd) [0x472021]
// ./sam2svg(main+0x1367) [0x4735fc]
// /lib/libc.so.6(__libc_start_main+0xe6) [0x7f40d25131a6]
//
if (3 == sscanf(symbollist[i], "%*[^(](%4095[^+]+%[^)]) %s", temp, offset, addr))
{
symname = temp;
if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status)))
{
symname = demangled;
}
}
// MAC OS X FORMAT:
// 1 sam2svg 0x0000000100003a39 _ZN5seqanL28signalHandlerPrintStackTraceEi + 21
// 2 libSystem.B.dylib 0x00007fff87a6d67a _sigtramp + 26
// 3 libSystem.B.dylib 0x00007fff87a76df7 tiny_free_do_recirc_to_depot + 980
// 4 sam2svg 0x00000001000021b9 _Z2f2v + 9
// 5 sam2svg 0x00000001000034b1 main + 4546
// 6 sam2svg 0x0000000100002190 start + 52
else if (3 == sscanf(symbollist[i], "%*d %*s %s %s %*s %s", addr, temp, offset))
{
symname = temp;
if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status)))
{
symname = demangled;
}
}
// LINUX FORMAT:
// ./sam2svg [0x473b8c]
// /lib/libc.so.6 [0x7f40d2526f60]
else if (2 == sscanf(symbollist[i], "%s %s", temp, addr))
{
symname = temp;
}
// DEFAULT:
else
{
symname = symbollist[i];
}
std::cerr << std::setw(3) << i - 1;
std::cerr << std::setw(20) << addr;
std::cerr << " " << symname;
if (offset[0] != 0)
std::cerr << " + " << offset;
std::cerr << std::endl;
free(demangled);
}
std::cerr << std::endl;
// Only the array must be freed according to man page, not the contents.
free(symbollist);
}
static void signalHandlerPrintStackTrace(int signum)
{
std::cerr << std::endl;
printStackTrace(20);
signal(signum, SIG_DFL);
kill(getpid(), signum);
}
inline int _deploySignalHandlers()
{
signal(SIGSEGV, signalHandlerPrintStackTrace); // segfault
signal(SIGFPE, signalHandlerPrintStackTrace); // divide by zero
// ...
return 0;
}
#if SEQAN_ENABLE_DEBUG
// automatically deploy signal handlers that output the stack trace on a trap (in debug mode)
template <typename T>
struct SignalHandlersDummy_
{
static const int i;
};
template <typename T>
const int SignalHandlersDummy_<T>::i = _deploySignalHandlers();
namespace {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-variable"
#endif // ifdef __clang__
volatile int signalHandlersDummy_ = SignalHandlersDummy_<void>::i;
#ifdef __clang__
#pragma clang diagnostic pop
#endif // ifdef __clang__
}
#endif // #if SEQAN_ENABLE_DEBUG
#endif // defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO
// Namespace for the testing infrastructure.
//
// This namespace contains the variables and functions that are used
// in the macros below to perform the tests.
namespace ClassTest {
// Raised when an assertion fails in test mode.
struct AssertionFailedException {};
// Container for static global data for the tests.
struct StaticData
{
// Number of tests that were run.
static int & testCount()
{
static int result = 0;
return result;
}
// Number of errors that occurred.
static int & errorCount()
{
static int result = 0;
return result;
}
// Number of skipped tests.
static int & skippedCount()
{
static int result = 0;
return result;
}
// Flag whether there was an error in this test.
static bool & thisTestOk()
{
static bool result = 0;
return result;
}
// Flag whether this test was skipped.
static bool & thisTestSkipped()
{
static bool result = 0;
return result;
}
// Name of the current test.
static const char * & currentTestName()
{
const char * defaultValue = "";
static const char * result = const_cast<char *>(defaultValue);
return result;
}
// Base path to the binary. Extrapolated from __FILE__.
static char * & basePath()
{
const char * defaultValue = ".";
static char * result = const_cast<char *>(defaultValue);
return result;
}
// Base path to the directory containing "core" and "extras."
// Extrapolated from __FILE__.
static char * & pathToRoot()
{
const char * defaultValue = ".";
static char * result = const_cast<char *>(defaultValue);
return result;
}
// Total number of checkpoints in header file.
static int & totalCheckPointCount()
{
static int result = 0;
return result;
}
// Total number of checkpoints found in binary files.
static int & foundCheckPointCount()
{
static int result = 0;
return result;
}
// Names of temporary files as returned by tempFileName. This
// global state is used to remove any existing such files
// after completing the testsuite.
static::std::vector<std::string> & tempFileNames()
{
static::std::vector<std::string> filenames;
return filenames;
}
};
// Open a temporary file, unlink it, return posix handle. Note: This has not been tested yet.
// TODO(holtgrew): Not used yet and Windows code does not work.
/*
inline
int openTempFile() {
#ifdef PLATFORM_WINDOWS
char * fileName = _tempnam(NULL, "SQN");
if (!fileName) {
::std::cerr << "Cannot create a unique temporary filename" << ::std::endl;
exit(1);
}
int result = open(fileName, _O_RDWR | OPEN_TEMPORARY);
free(fileName);
return result;
#else // A Unix...
char filenameBuffer[100];
strcpy(filenameBuffer, "/tmp/SEQANXXXXXXXXXX");
int result = mkstemp(filenameBuffer);
unlink(filenameBuffer);
return result;
#endif // ifdef PLATFORM_WINDOWS
}
*/
// Return the path to a temporary file, in a static buffer in this
// function. This is not thread safe!
inline
const char * tempFileName()
{
//IOREV _duplicate_ overlaps with some stuff in system/file_sync.h, should be moved to io-module
static char fileNameBuffer[1000];
#ifdef PLATFORM_WINDOWS_VS
static char filePathBuffer[1000];
// Gets the temp path env string (no guarantee it's a valid path).
DWORD dwRetVal = 0;
dwRetVal = GetTempPath(1000, // length of the buffer
filePathBuffer); // buffer for path
if (dwRetVal > 1000 || (dwRetVal == 0))
{
std::cerr << "GetTempPath failed" << std::endl;
exit(1);
}
UINT uRetVal = 0;
uRetVal = GetTempFileName(filePathBuffer, // directory for tmp files
TEXT("SEQAN."), // temp file name prefix
0, // create unique name
fileNameBuffer); // buffer for name
if (uRetVal == 0)
{
std::cerr << "GetTempFileName failed" << std::endl;
exit(1);
}
DeleteFile(fileNameBuffer);
CreateDirectoryA(fileNameBuffer, NULL);
StaticData::tempFileNames().push_back(fileNameBuffer);
strcat(fileNameBuffer, "\\test_file");
return fileNameBuffer;
#else // ifdef PLATFORM_WINDOWS_VS
strcpy(fileNameBuffer, "/tmp/SEQAN.XXXXXXXXXXXXXXXXXXXX");
#ifdef PLATFORM_WINDOWS_MINGW
// There is no mkstemp in MinGW but it does not complain about tmpnam.
tmpnam(fileNameBuffer);
#else // ifdef PLATFORM_WINDOWS_MINGW
int _tmp = mkstemp(fileNameBuffer);
(void) _tmp;
unlink(fileNameBuffer);
mkdir(fileNameBuffer, 0777);
StaticData::tempFileNames().push_back(fileNameBuffer);
strcat(fileNameBuffer, "/test_file");
#endif // #ifdef PLATFORM_WINDOWS_MINGW
return fileNameBuffer;
#endif // ifdef PLATFORM_WINDOWS_VS
}
// Initialize the testing infrastructure.
//
// Used through SEQAN_BEGIN_TESTSUITE(test_name)
inline
void beginTestSuite(const char * testSuiteName, const char * argv0)
{
// First things first: Print test suite name and current debug level.
std::cout << "TEST SUITE " << testSuiteName << std::endl;
printDebugLevel(std::cout);
(void)testSuiteName;
StaticData::testCount() = 0;
StaticData::skippedCount() = 0;
StaticData::errorCount() = 0;
StaticData::totalCheckPointCount() = 0;
StaticData::foundCheckPointCount() = 0;
// Get path to argv0.
const char * end = argv0;
const char * ptr = std::min(strchr(argv0, '\\'), strchr(argv0, '/')); // On Windows, we can have both \ and /.
for (; ptr != 0; ptr = std::min(strchr(ptr + 1, '\\'), strchr(ptr + 1, '/')))
end = ptr;
int rpos = end - argv0;
if (rpos <= 0)
{
StaticData::basePath() = new char[2];
strcpy(StaticData::basePath(), ".");
}
else
{
int len = rpos;
StaticData::basePath() = new char[len];
strncpy(StaticData::basePath(), argv0, len);
}
// Get path to projects.
const char * file = __FILE__;
int pos = -1;
for (size_t i = 0; i < strlen(file) - strlen("core"); ++i)
{
if (strncmp(file + i, "core", strlen("core")) == 0)
{
pos = i;
}
}
for (; pos > 0 && *(file + pos - 1) != '/' && *(file + pos - 1) != '\\'; --pos)
continue;
if (pos == -1)
{
std::cerr << "Could not extrapolate path to repository from __FILE__ == \""
<< __FILE__ << "\"" << std::endl;
exit(1);
}
StaticData::pathToRoot() = new char[pos];
strncpy(StaticData::pathToRoot(), file, pos);
StaticData::pathToRoot()[pos - 1] = '\0';
#ifdef PLATFORM_WINDOWS_VS
// Set CRT reporting such that everything goes to stderr and there are
// no popups causing timeouts.
_set_error_mode(_OUT_TO_STDERR);
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
_CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
_CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
#endif // PLATFORM_WINDOWS_VS
}
// Run test suite finalization.
//
// Used through SEQAN_END_TESTSUITE
//
// Prints a bottom banner with the error count and returns the
// program's return code.
inline
int endTestSuite()
{
delete[] StaticData::basePath();
delete[] StaticData::pathToRoot();
std::cout << "**************************************" << std::endl;
std::cout << " Total Check Points : " << StaticData::totalCheckPointCount() << std::endl;
std::cout << " Found Check Points : " << StaticData::foundCheckPointCount() << std::endl;
std::cout << " Lost Check Points : " << StaticData::totalCheckPointCount() - StaticData::foundCheckPointCount() << std::endl;
std::cout << "--------------------------------------" << std::endl;
std::cout << " Total Tests: " << StaticData::testCount() << std::endl;
std::cout << " Skipped: " << StaticData::skippedCount() << std::endl;
std::cout << " Errors: " << StaticData::errorCount() << std::endl;
std::cout << "**************************************" << std::endl;
// TODO(holtgrew): Re-enable that all check points have to be found for the test to return 1;
/*
if (StaticData::totalCheckPointCount() != StaticData::foundCheckPointCount())
return 1;
*/
// Delete all temporary files that still exist.
for (unsigned i = 0; i < StaticData::tempFileNames().size(); ++i)
{
#ifdef PLATFORM_WINDOWS
HANDLE hFind;
WIN32_FIND_DATA data;
std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("\\*");
hFind = FindFirstFile(temp.c_str(), &data);
if (hFind != INVALID_HANDLE_VALUE)
{
do
{
std::string tempp = StaticData::tempFileNames()[i].c_str() + std::string("\\") + data.cFileName;
DeleteFile(tempp.c_str());
}
while (FindNextFile(hFind, &data));
FindClose(hFind);
}
RemoveDirectory(StaticData::tempFileNames()[i].c_str());
#else // #ifdef PLATFORM_WINDOWS
DIR * dpdf;
struct dirent * epdf;
dpdf = opendir(StaticData::tempFileNames()[i].c_str());
if (dpdf != NULL)
{
while ((epdf = readdir(dpdf)) != NULL)
{
std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("/") + std::string(epdf->d_name);
unlink(temp.c_str());
}
}
rmdir(StaticData::tempFileNames()[i].c_str());
#endif // #ifdef PLATFORM_WINDOWS
}
if (StaticData::errorCount() != 0)
return 1;
return 0;
}
// Run test initialization.
inline
void beginTest(const char * testName)
{
StaticData::currentTestName() = testName;
StaticData::thisTestOk() = true;
StaticData::thisTestSkipped() = false;
StaticData::testCount() += 1;
}
// Run test finalization.
inline
void endTest()
{
if (StaticData::thisTestSkipped())
{
std::cout << StaticData::currentTestName() << " SKIPPED" << std::endl;
}
else if (StaticData::thisTestOk())
{
std::cout << StaticData::currentTestName() << " OK" << std::endl;
}
else
{
std::cerr << StaticData::currentTestName() << " FAILED" << std::endl;
}
}
// Marks the current test as "skipped".
inline
void skipCurrentTest()
{
StaticData::thisTestSkipped() = true;
StaticData::skippedCount() += 1;
}
// Called by the macro SEQAN_ASSERT_FAIL.
inline void forceFail(const char * file, int line,
const char * comment, ...)
{
StaticData::errorCount() += 1;
std::cerr << file << ":" << line << " FAILED! ";
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
}
// Similar to forceFail above, but accepting a va_list parameter.
inline void vforceFail(const char * file, int line,
const char * comment, va_list argp)
{
StaticData::errorCount() += 1;
std::cerr << file << ":" << line << " FAILED! ";
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
}
// Same as forceFail above, but with comment set to 0.
inline void forceFail(const char * file, int line)
{
forceFail(file, line, 0);
}
// Called by the macro SEQAN_ASSERT_EQ.
//
// Tests that the given two value are equal. Returns true iff the
// two values are equal.
template <typename T1, typename T2>
bool testEqual(char const * file, int line,
T1 const & value1, char const * expression1,
T2 const & value2, char const * expression2,
char const * comment, ...)
{
if (!(value1 == value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " == " << expression2 << " was: " << value1
<< " != " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testEqual above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 == value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " == " << expression2 << " was: " << value1
<< " != " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testEqual above, but with comment set to 0.
template <typename T1, typename T2>
bool testEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testEqual(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_IN_DELTA.
//
// Tests that the given two value are equal. Returns true iff the
// two values are equal.
template <typename T1, typename T2, typename T3>
bool testInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3,
const char * comment, ...)
{
if (!(value1 >= value2 - value3 && value1 <= value2 + value3))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " in [" << expression2 << " - " << expression3
<< ", " << expression2 << " + " << expression3 << "] was: " << value1
<< " not in [" << value2 - value3 << ", " << value2 + value3 << "]";
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testInDelta above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2, typename T3>
bool vtestInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3,
const char * comment, va_list argp)
{
if (!(value1 >= value2 - value3 && value1 <= value2 + value3))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " in [" << expression2 << " - " << expression3
<< ", " << expression2 << " + " << expression3 << "] was: " << value1
<< " not in [" << value2 - value3 << ", " << value2 + value3 << "]";
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testInDelta above, but with comment set to 0.
template <typename T1, typename T2, typename T3>
bool testInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3)
{
return testInDelta(file, line, value1, expression1, value2, expression2, value3, expression3, 0);
}
// Called by the macro SEQAN_ASSERT_NEQ.
//
// Tests that the given two value are not equal. Returns true iff
// the two values are equal.
template <typename T1, typename T2>
bool testNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 != value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " != " << expression2 << " was: " << value1
<< " == " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testNotEqual above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 != value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " != " << expression2 << " was: " << value1
<< " == " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testNotEqual above, but with comment set to 0.
template <typename T1, typename T2>
bool testNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testNotEqual(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_GEQ.
//
// Tests that the first value is greater than or equal to the
// second one. Returns true iff the test yields true.
template <typename T1, typename T2>
bool testGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 >= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " >= " << expression2 << " was: " << value1
<< " < " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testGeq above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 >= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " >= " << expression2 << " was: " << value1
<< " < " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testGeq above, but with comment set to 0.
template <typename T1, typename T2>
bool testGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testGeq(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_GT.
//
// Tests that the first value is greater than the second one.
// Returns true iff the test yields true.
template <typename T1, typename T2>
bool testGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 > value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " > " << expression2 << " was: " << value1
<< " <= " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testGt above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 > value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " > " << expression2 << " was: " << value1
<< " <= " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testGt above, but with comment set to 0.
template <typename T1, typename T2>
bool testGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testGt(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_LEQ.
//
// Tests that the first value is less than or equal to the second
// one. Returns true iff the test yields true.
template <typename T1, typename T2>
bool testLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 <= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " <= " << expression2 << " was: " << value1
<< " > " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testLeq above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 <= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " <= " << expression2 << " was: " << value1
<< " > " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testLeq above, but with comment set to 0.
template <typename T1, typename T2>
bool testLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testLeq(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_LT.
//
// Tests that the first value is greater than the second one.
// Returns true iff the test yields true.
template <typename T1, typename T2>
bool testLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 < value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " < " << expression2 << " was: " << value1
<< " >= " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testLt above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 < value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " < " << expression2 << " was: " << value1
<< " >= " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testLt above, but comment is 0.
template <typename T1, typename T2>
bool testLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testLt(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT.
//
// Test that the given argument evaluates to true.
template <typename T>
bool testTrue(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, ...)
{
if (!(value_))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be true but was " << (value_);
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testTrue above, but accepts a va_list instead of variadic
// parameters.
template <typename T>
bool vtestTrue(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, va_list argp)
{
if (!(value_))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be true but was " << (value_);
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testTrue above, but comment will automatically be set to 0.
template <typename T>
bool testTrue(const char * file, int line,
const T & value_, const char * expression_)
{
return testTrue(file, line, value_, expression_, 0);
}
// Called by the macro SEQAN_ASSERT.
//
// Test that the given argument evaluates to false.
template <typename T>
bool testFalse(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, ...)
{
if (value_)
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be false but was " << (value_);
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testFalse above, but accepts a va_list instead of variadic
// parameters.
template <typename T>
bool vtestFalse(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, va_list argp)
{
if (value_)
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be false but was " << (value_);
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testFalse above, but comment will automatically be set to 0.
template <typename T>
bool testFalse(const char * file, int line,
const T & value_, const char * expression_)
{
return testFalse(file, line, value_, expression_, 0);
}
// Represents a check point in a file.
struct CheckPoint
{
// Path to the file.
const char * file;
// Line in the file.
unsigned int line;
// Less-than comparator for check points.
bool operator<(const CheckPoint & other) const
{
int c = strcmp(file, other.file);
if (c < 0)
return true;
if (c == 0 && line < other.line)
return true;
return false;
}
};
// Wrapper for a set of check points.
// TODO(holtgrew): Simply store the set?
struct CheckPointStore
{
static::std::set<CheckPoint> & data()
{
static::std::set<CheckPoint> result;
return result;
}
};
// Puts the given check point into the CheckPointStore's data.
inline bool
registerCheckPoint(unsigned int line, const char * file)
{
const char * file_name = strrchr(file, '/');
const char * file_name_2 = strrchr(file, '\\');
if (file_name_2 > file_name)
file_name = file_name_2;
if (!file_name)
file_name = file;
else
++file_name;
CheckPoint cp = {file_name, line};
#ifdef _OMP
#pragma omp critical
#endif // #ifdef _OMP
CheckPointStore::data().insert(cp);
return true;
}
// Test whether the given check point exists in the check point
// store.
inline void
testCheckPoint(const char * file, unsigned int line)
{
StaticData::totalCheckPointCount() += 1;
CheckPoint cp = {file, line};
if (CheckPointStore::data().find(cp) == CheckPointStore::data().end())
{
std::cerr << file << ":" << line << " -- Check point lost."
<< std::endl;
return;
}
StaticData::foundCheckPointCount() += 1;
}
// Verify the check points for the given file.
inline void
verifyCheckPoints(const char * file)
{
char const * file_name = strrchr(file, '/');
char const * file_name_2 = strrchr(file, '\\');
if (file_name_2 > file_name)
file_name = file_name_2;
if (!file_name)
file_name = file;
else
++file_name;
int len = strlen(StaticData::pathToRoot()) +
strlen("/") + strlen(file) + 1;
char * absolutePath = new char[len];
absolutePath[0] = '\0';
strcat(absolutePath, StaticData::pathToRoot());
strcat(absolutePath, "/");
strcat(absolutePath, file);
FILE * fl = ::std::fopen(absolutePath, "r");
delete[] absolutePath;
if (!fl)
{
std::cerr << file << " -- verifyCheckPoints could not find this file." << std::endl;
}
unsigned int line_number = 1;
char buf[1 << 16];
while (::std::fgets(buf, sizeof(buf), fl))
{
if (::std::strstr(buf, "SEQAN_CHECKPOINT"))
{
testCheckPoint(file_name, line_number);
}
++line_number;
}
::std::fclose(fl);
}
#if SEQAN_ENABLE_TESTING
// If in testing mode then raise an AssertionFailedException.
inline void fail()
{
StaticData::thisTestOk() = false;
printStackTrace(20);
throw AssertionFailedException();
}
#else
// If not in testing mode then quit with an abort.
inline void fail()
{
printStackTrace(20);
abort();
}
#endif // #if SEQAN_ENABLE_TESTING
} // namespace ClassTest
/**
.Macro.SEQAN_DEFINE_TEST
..summary:Expand to test definition.
..cat:Testing & Debugging
..signature:SEQAN_DEFINE_TEST(test_name)
..param.test_name:The name of the test.
..remarks:This macro expands to the definition of a $void$ function with $SEQAN_TEST_ + test_name$ as its name.
..example.code:
SEQAN_DEFINE_TEST(test_name)
{
SEQAN_ASSERT_LT(0, 3);
}
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
..see:Macro.SEQAN_END_TESTSUITE
*/
// This macro expands to function header for one test.
#define SEQAN_DEFINE_TEST(test_name) \
template <bool speed_up_dummy_to_prevent_compilation_of_unused_tests_> \
void SEQAN_TEST_ ## test_name()
/**
.Macro.SEQAN_BEGIN_TESTSUITE
..summary:Expand to a test suite beginning.
..cat:Testing & Debugging
..signature:SEQAN_BEGIN_TESTSUITE(name)
..param.name:The name of the test suite.
..remarks:This macro expands to a $main()$ function and some initialization code that sets up the test system.
..example.code:
#include <seqan/basic.h>
SEQAN_BEGIN_TESTSUITE(test_foo)
{
SEQAN_CALL_TEST(test_foo_my_test);
}
SEQAN_END_TESTSUITE
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_END_TESTSUITE
*/
#if SEQAN_ENABLE_TESTING
// This macro expands to startup code for a test file.
#define SEQAN_BEGIN_TESTSUITE(suite_name) \
int main(int argc, char ** argv) { \
(void) argc; \
::seqan::ClassTest::beginTestSuite(# suite_name, argv[0]);
/**
.Macro.SEQAN_END_TESTSUITE
..summary:Expand to a test suite ending.
..cat:Testing & Debugging
..signature:SEQAN_END_TESTSUITE
..remarks:This macro expands to finalization code for a test suite.
..example.code:
#include <seqan/basic.h>
SEQAN_BEGIN_TESTSUITE(test_foo)
{
SEQAN_CALL_TEST(test_foo_my_test);
}
SEQAN_END_TESTSUITE
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
*/
// This macro expands to shutdown code for a test file.
#define SEQAN_END_TESTSUITE \
return ::seqan::ClassTest::endTestSuite(); \
}
/**
.Macro.SEQAN_CALL_TEST
..summary:Expand to calling a test.
..cat:Testing & Debugging
..signature:SEQAN_CALL_TEST(test_name)
..param.test_name:The name of the test.
..remarks:This expects the test to be defined with @Macro.SEQAN_DEFINE_TEST@. This macro will expand to code that calls the code inside a try/catch block. Use this macro within a test suite, only.
..example.code:
// Within a test suite.
SEQAN_CALL_TEST(test_name);
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
..see:Macro.SEQAN_END_TESTSUITE
*/
// This macro expands to code to call a given test.
#define SEQAN_CALL_TEST(test_name) \
do { \
::seqan::ClassTest::beginTest(# test_name); \
try { \
SEQAN_TEST_ ## test_name<true>(); \
} catch (::seqan::ClassTest::AssertionFailedException e) { \
/* Swallow exception, go on with next test. */ \
(void) e; /* Get rid of unused variable warning. */ \
} \
::seqan::ClassTest::endTest(); \
} while (false)
/**
.Macro.SEQAN_SKIP_TEST
..cat:Testing & Debugging
..summary:Force the test to return without failing and mark it as skipped.
..signature:SEQAN_SKIP_TEST
..example.code:
SEQAN_DEFINE_TEST(test_skipped)
{
SEQAN_SKIP_TEST;
}
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
..see:Macro.SEQAN_END_TESTSUITE
*/
// This macro returns from the current function and logs a "skipped"
// event for the current test.
#define SEQAN_SKIP_TEST \
do { \
::seqan::ClassTest::skipCurrentTest(); \
return; \
} while (false)
#endif // #if SEQAN_ENABLE_TESTING
// variadic macros are not supported by VS 2003 and before
#if !defined(_MSC_VER) || (_MSC_VER >= 1400)
#if SEQAN_ENABLE_DEBUG
/**
.Macro.SEQAN_ASSERT
..cat:Assertions
..summary:Test that the given expression can be coerced to $true$.
..signature:SEQAN_ASSERT(expression)
..signature:SEQAN_ASSERT_MSG(expression, message[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT(0); // will fail
SEQAN_ASSERT(1); // will run through
SEQAN_ASSERT_MSG(0, "message %d", 2); // Will fail with message.
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_NOT
..cat:Assertions
..summary:Test that the given expression can be coerced to $false$.
..signature:SEQAN_ASSERT(expression)
..signature:SEQAN_ASSERT_MSG(expression, message[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_NOT(0); // will run through
SEQAN_ASSERT_NOT(1); // will fail
SEQAN_ASSERT_NOT_MSG(0, "msg %s", "test"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_EQ
..cat:Assertions
..summary:Test that two given expressions are equal, as defined by the matching call to the $operator=(,)$.
..signature:SEQAN_ASSERT_EQ(expression1, expression2)
..signature:SEQAN_ASSERT_EQ_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_EQ(0, false); // will run through
SEQAN_ASSERT_EQ(1, false); // will fail
SEQAN_ASSERT_EQ(1, "foo"); // will not compile
SEQAN_ASSERT_EQ_MSG(1, false, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_NEQ
..cat:Assertions
..summary:Test that two given expressions are not equal, as defined by the matching call to the $operator!=(,)$.
..signature:SEQAN_ASSERT_NEQ(expression)
..signature:SEQAN_ASSERT_NEQ_MSG(expression, message[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_NEQ(0, false); // will fail
SEQAN_ASSERT_NEQ(1, false); // will run through
SEQAN_ASSERT_NEQ(1, "foo"); // will not compile
SEQAN_ASSERT_NEQ_MSG(1, false, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_LT
..cat:Assertions
..summary:Test that the two given expressions are in the less-than relation as defined by the matching call to operator<(,).
..signature:SEQAN_ASSERT_LT(expression1, expression2)
..signature:SEQAN_ASSERT_LT(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_LT(0, 1); // will run through
SEQAN_ASSERT_LT(1, 1); // will not run through
SEQAN_ASSERT_LT_MSG(1, 1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_LEQ
..cat:Assertions
..summary:Test that the two given expressions are in the less-than-or-equal relation as defined by the matching call to operator<=(,).
..signature:SEQAN_ASSERT_LEQ(expression1, expression2)
..signature:SEQAN_ASSERT_LEQ_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_LEQ(1, 1); // will run through
SEQAN_ASSERT_LEQ(1, 2); // will not run through
SEQAN_ASSERT_LEQ_MSG(1, 2, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_GT
..cat:Assertions
..summary:Test that the two given expressions are in the greather-than relation as defined by the matching call to operator>(,).
..signature:SEQAN_ASSERT_GT(expression1, expression2)
..signature:SEQAN_ASSERT_GT_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_GT(2, 1); // will run through
SEQAN_ASSERT_GT(1, 1); // will not run through
SEQAN_ASSERT_GT_MSG(1, 1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_GEQ
..cat:Assertions
..summary:Test that the two given expressions are in the greater-than-or-equal relation as defined by the matching call to operator>=(,).
..signature:SEQAN_ASSERT_GEQ(expression1, expression2)
..signature:SEQAN_ASSERT_GEQ_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_GEQ(1, 1); // will run through
SEQAN_ASSERT_GEQ(0, 1); // will not run through
SEQAN_ASSERT_GEQ_MSG(0, 1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_IN_DELTA
..cat:Assertions
..summary:Test that the given expression can be coerced to $true$.
..signature:SEQAN_ASSERT_IN_DELTA(x, y, delta)
..signature:SEQAN_ASSERT_IN_DELTA_MSG(x, y, delta, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_IN_DELTA(0, 0, 0.1); // will run through
SEQAN_ASSERT_IN_DELTA(1, -2, 1); // will fail
SEQAN_ASSERT_IN_DELTA(1, "foo"); // will not compile
SEQAN_ASSERT_IN_DELTA_MSG(1, 0, 0.1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
*/
// Force a test failure.
//
// Usage: SEQAN_ASSERT_FAIL("Failed at position %d", pos);
#define SEQAN_ASSERT_FAIL(...) \
do { \
::seqan::ClassTest::forceFail(__FILE__, __LINE__, \
__VA_ARGS__); \
::seqan::ClassTest::fail(); \
} while (false)
// Equality assertion without a comment.
//
// Usage: SEQAN_ASSERT_EQ(4, 4);
#define SEQAN_ASSERT_EQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Equality assertion with a comment.
//
// Usage: SEQAN_ASSERT_EQ(4, 4);
#define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// In-delta-environment assertion without a comment.
//
// Usage: SEQAN_ASSERT_IN_DELTA(4.1, 4, 0.1);
#define SEQAN_ASSERT_IN_DELTA(_arg1, _arg2, _arg3) \
do { \
if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
(_arg3), # _arg3)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// In-delta-environment assertion witha comment.
//
// Usage: SEQAN_ASSERT_IN_DELTA_MSG(4.1, 4, 0.1, "3.9 <= 4.1 <= 4.1");
#define SEQAN_ASSERT_IN_DELTA_MSG(_arg1, _arg2, _arg3, ...) \
do { \
if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
(_arg3), # _arg3, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Inequality assertion without a comment.
//
// Usage: SEQAN_ASSERT_NEQ(4, 5);
#define SEQAN_ASSERT_NEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Inequality assertion with a comment.
//
// Usage: SEQAN_ASSERT_NEQ(4, 5);
#define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than-or-equal assertion without a comment.
#define SEQAN_ASSERT_LEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than-or-equal assertion with a comment.
#define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than assertion without a comment.
#define SEQAN_ASSERT_LT(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than assertion with a comment.
#define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than-or-equal assertion without a comment.
#define SEQAN_ASSERT_GEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than-or-equal assertion with a comment.
#define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than assertion without a comment.
#define SEQAN_ASSERT_GT(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than assertion with a comment.
#define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.;
// Trueness assertion with a comment.
//
// Usage: SEQAN_ASSERT(false);
#define SEQAN_ASSERT(_arg1) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.;
// Trueness assertion with a comment.
#define SEQAN_ASSERT_MSG(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Falseness assertion without a comment.
//
// Usage: SEQAN_ASSERT_NOT(false);
#define SEQAN_ASSERT_NOT(_arg1) \
do { \
if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \
(_arg1), # _arg1)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Falseness assertion with a comment.
#define SEQAN_ASSERT_NOT_MSG(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
#else // #if SEQAN_ENABLE_DEBUG
#define SEQAN_ASSERT_EQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_NEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_LEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_LT(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_GEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_GT(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT(_arg1) do {} while (false)
#define SEQAN_ASSERT_MSG(_arg1, ...) do {} while (false)
#define SEQAN_ASSERT_NOT(_arg1) do {} while (false)
#define SEQAN_ASSERT_NOT_MSG(_arg1, ...) do {} while (false)
#define SEQAN_ASSERT_FAIL(...) do {} while (false)
#endif // #if SEQAN_ENABLE_DEBUG
#else // no variadic macros
#if SEQAN_ENABLE_DEBUG
inline void SEQAN_ASSERT_FAIL(const char * comment, ...)
{
va_list args;
va_start(args, comment);
::seqan::ClassTest::vforceFail("", 0, comment, args);
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3)
{
if (!::seqan::ClassTest::testInDelta("", 0, _arg1, "", _arg2, "", _arg3, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestInDelta("", 0, _arg1, "", _arg2, "", _arg3, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testEqual("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestEqual("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testNotEqual("", _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestNotEqual("", _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testLeq("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestLeq("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testLt("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestLt("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testGeq("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestGeq("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testGt("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestGt("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1>
void SEQAN_ASSERT(T1 const & _arg1)
{
if (!::seqan::ClassTest::testTrue("", 0, _arg1, ""))
::seqan::ClassTest::fail();
}
template <typename T1>
void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestTrue("", 0, _arg1, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1>
void SEQAN_ASSERT_NOT(T1 const & _arg1)
{
if (!::seqan::ClassTest::testFalse("", 0, _arg1, ""))
::seqan::ClassTest::fail();
}
template <typename T1>
void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestFalse("", 0, _arg1, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
#else // #if SEQAN_ENABLE_DEBUG
inline void SEQAN_ASSERT_FAIL(const char * comment, ...) {}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) {}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1>
void SEQAN_ASSERT(T1 const & _arg1) {}
template <typename T1>
void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) {}
template <typename T1>
void SEQAN_ASSERT_NOT(T1 const & _arg1) {}
template <typename T1>
void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) {}
#endif // #if SEQAN_ENABLE_DEBUG
#endif // no variadic macros
// Returns a string (of type char*) with the path to the called binary.
//
// Use this to locate files relative to the test binary.
#define SEQAN_PROGRAM_PATH \
::seqan::ClassTest::StaticData::basePath()
// TODO(holtgrew): Subject to change wiht restructuring.
/**
.Macro.SEQAN_PATH_TO_ROOT
..cat:Testing & Debugging
..summary:Return path to the checkout root directory (i.e. containing core/extras).
..returns:$char const *$, string with the path to the parent directory of the tests directory.
..signature:SEQAN_PATH_TO_ROOT()
..remarks:The pointed to string is initialized on program startup by the code generated by @Macro.SEQAN_BEGIN_TESTSUITE@.
..example.code:
const char *p = SEQAN_PATH_TO_ROOT);
char buffer[1000];
strcpy(buffer, p);
strcat(buffer, "/tests/files/example.txt");
FILE *f = fopen(buffer, "w");
fprintf(f, "Test Data");
fclose(f);
..see:Macro.SEQAN_TEMP_FILENAME
*/
// Returns a const char * string with the path to the projects directory.
#define SEQAN_PATH_TO_ROOT() \
::seqan::ClassTest::StaticData::pathToRoot()
// Returns the POSIX int file handle to an open file.
// TODO(holtgrewe): Uncomment if openTempFile has been implemented.
// #define SEQAN_OPEN_TEMP_FILE() (::seqan::ClassTest::openTempFile())
/**
.Macro.SEQAN_TEMP_FILENAME
..cat:Testing & Debugging
..summary:Generates the name to a temporary file.
..returns:$char const *$, string with the path to a temporary file.
..signature:SEQAN_TEMP_FILENAME()
..remarks:The pointed to string is stored in a buffer and is overwritten by the next call to this macro. Copy it out if you need it.
..example.code:
const char *p = SEQAN_TEMP_FILENAME();
buffer char tempFilename[1000];
strcpy(tempFilename, p);
FILE *f = fopen(tempFilename, "w");
fprintf(f, "Test Data");
fclose(f);
..see:Macro.SEQAN_PATH_TO_ROOT
*/
// Returns a temporary filename.
#define SEQAN_TEMP_FILENAME() (::seqan::ClassTest::tempFileName())
/**
.Macro.SEQAN_VERIFY_CHECKPOINTS
..cat:Testing & Debugging
..summary:Verify check points for the given file name.
..signature:SEQAN_VERIFY_CHECKPOINTS(path)
..param.path:Path to the file to verify check points for. Relative to parent directory of tests.
..example.code:
SEQAN_VERIFY_CHECKPOINTS("core/include/seqan/basic_alphabet.h");
..see:Macro.SEQAN_CHECKPOINT
.Macro.SEQAN_CHECKPOINT
..cat:Testing & Debugging
..summary:Generate a check point.
..signature:SEQAN_CHECKPOINT
..remarks:Whever the code executes the instructions generated by this macro, the check point for this line will be set in global testing state. Use @Macro.SEQAN_VERIFY_CHECKPOINTS@ to verify whether all checkpoints have been reached in a file up to this point.
SEQAN_CHECKPOINT;
..see:Macro.SEQAN_VERIFY_CHECKPOINTS
*/
#if SEQAN_ENABLE_CHECKPOINTS
// Create a check point at the point where the macro is placed.
// TODO(holtgrew): Should be called SEQAN_CHECK_POINT to be consistent.
#define SEQAN_CHECKPOINT \
::seqan::ClassTest::registerCheckPoint(__LINE__, __FILE__);
// Call the check point verification code for the given file.
#define SEQAN_VERIFY_CHECKPOINTS(filename) \
::seqan::ClassTest::verifyCheckPoints(filename)
#else // #if SEQAN_ENABLE_CHECKPOINTS
#define SEQAN_CHECKPOINT
// If checkpoints are to be verified if testing is disabled then print
// a warning.
#define SEQAN_VERIFY_CHECKPOINTS(filename) \
do { \
fprintf(stderr, ("WARNING: Check point verification is " \
"disabled. Trying to verify %s from %s:%d.\n"), \
filename, __FILE__, __LINE__); \
} while (false)
#endif // #if SEQAN_ENABLE_CHECKPOINTS
#if !SEQAN_ENABLE_TESTING
#define SEQAN_BEGIN_TESTSUITE(suite_name) \
int main(int argc, char ** argv) { \
(void) argc; \
(void) argv; \
fprintf(stderr, "Warning: SEQAN_ENABLE_TESTING is wrong and you used the macro SEQAN_BEGIN_TESTSUITE!\n");
#define SEQAN_END_TESTSUITE \
return 0; \
}
#define SEQAN_CALL_TEST(test_name) do { SEQAN_TEST_ ## test_name(); } while (false)
#define SEQAN_SKIP_TEST do {} while (false)
#endif // #if !SEQAN_ENABLE_TESTING
} // namespace seqan
#endif // SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
|
GB_binop__le_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int16)
// A*D function (colscale): GB (_AxD__le_int16)
// D*A function (rowscale): GB (_DxB__le_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int16)
// C=scalar+B GB (_bind1st__le_int16)
// C=scalar+B' GB (_bind1st_tran__le_int16)
// C=A+scalar GB (_bind2nd__le_int16)
// C=A'+scalar GB (_bind2nd_tran__le_int16)
// C type: bool
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT16 || GxB_NO_LE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
syrk.c | /**
* syrk.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include "BenchmarksUtil.h"
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#define BENCHMARK_NAME "SYRK"
// define the error threshold for the results "not matching"
#define ERROR_THRESHOLD 0.05
#ifdef RUN_POLYBENCH_SIZE
#define SIZE 1024
#elif RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
/* Problem size */
#define N SIZE
#define M SIZE
/* Declared constant values for alpha and beta */
/* (same as values in PolyBench 2.0) */
#define alpha 12435
#define beta 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *A, DATA_TYPE *C, DATA_TYPE *D) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < M; j++) {
A[i * M + j] = ((DATA_TYPE)i * j) / N;
}
for (j = 0; j < M; j++) {
C[i * M + j] = ((DATA_TYPE)i * j + 2) / N;
D[i * M + j] = ((DATA_TYPE)i * j + 2) / N;
}
}
}
int compareResults(DATA_TYPE *C, DATA_TYPE *D) {
int i, j, fail;
fail = 0;
// Compare C with D
for (i = 0; i < N; i++) {
for (j = 0; j < M; j++) {
if (percentDiff(C[i * M + j], D[i * M + j]) > ERROR_THRESHOLD) {
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
void syrk(DATA_TYPE *A, DATA_TYPE *C) {
int i, j, k;
for (i = 0; i < N; i++) {
for (j = 0; j < M; j++) {
C[i * M + j] *= beta;
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < M; j++) {
for (k = 0; k < M; k++) {
C[i * N + j] += alpha * A[i * M + k] * A[j * M + k];
}
}
}
}
void syrkGPU(DATA_TYPE *A, DATA_TYPE *Dinit, DATA_TYPE *D1, DATA_TYPE *D2) {
double t_start, t_end;
t_start = rtclock();
#pragma omp target teams map(to : A[ : N *M], Dinit[ : N *M]) map(tofrom : D1[ : N *M], D2[ : N *M]) device(DEVICE_ID)
{
#pragma omp distribute parallel for collapse(2)
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
D1[i * M + j] = Dinit[i * M + j] * beta;
}
}
#pragma omp distribute parallel for collapse(2)
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
D2[i * N + j] = D1[i * N + j];
for (int k = 0; k < M; k++) {
D2[i * N + j] += alpha * A[i * M + k] * A[j * M + k];
}
}
}
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
int main() {
double t_start, t_end;
int fail = 0;
DATA_TYPE *A;
DATA_TYPE *C;
DATA_TYPE *Dinit;
DATA_TYPE *D1;
DATA_TYPE *D2;
A = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
C = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
Dinit = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
D1 = (DATA_TYPE *)calloc(N * M, sizeof(DATA_TYPE));
D2 = (DATA_TYPE *)calloc(N * M, sizeof(DATA_TYPE));
//fprintf(stdout, "<< Symmetric rank-k operations size: %d>>\n", SIZE);
printBenchmarkInfo(BENCHMARK_NAME, SIZE);
init_arrays(A, C, Dinit);
syrkGPU(A, Dinit, D1, D2);
#ifdef RUN_TEST
t_start = rtclock();
syrk(A, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(C, D2);
#endif
free(A);
free(C);
free(Dinit);
return fail;
}
|
vec_avx.h | /* Copyright (c) 2018 Mozilla
2012-2017 Jean-Marc Valin */
/*
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
AVX implementation of vector operations, compile with -mavx
AVX2/FMA implementation of vector operations, compile with -mavx2 -mfma
*/
#include <immintrin.h>
#include <omp.h>
#define OMP
#ifdef __AVX2__
static __m256 exp8_approx(__m256 X)
{
const __m256 K0 = _mm256_set1_ps(0.99992522f);
const __m256 K1 = _mm256_set1_ps(0.69583354f);
const __m256 K2 = _mm256_set1_ps(0.22606716f);
const __m256 K3 = _mm256_set1_ps(0.078024523f);
const __m256 log2_E = _mm256_set1_ps(1.44269504);
const __m256 max_in = _mm256_set1_ps(50.f);
const __m256 min_in = _mm256_set1_ps(-50.f);
const __m256i mask = _mm256_set1_epi32(0x7fffffff);
__m256 XF, Y;
__m256i I;
X = _mm256_mul_ps(X, log2_E);
X = _mm256_max_ps(min_in, _mm256_min_ps(max_in, X));
XF = _mm256_floor_ps(X);
I = _mm256_cvtps_epi32(XF);
X = _mm256_sub_ps(X, XF);
Y = _mm256_fmadd_ps(_mm256_fmadd_ps(_mm256_fmadd_ps(K3, X, K2), X, K1), X, K0);
I = _mm256_slli_epi32(I, 23);
Y = _mm256_castsi256_ps(_mm256_and_si256(mask, _mm256_add_epi32(I, _mm256_castps_si256(Y))));
return Y;
}
#else
#define _mm256_fmadd_ps(a,b,c) _mm256_add_ps(_mm256_mul_ps(a, b), c)
#define _mm_fmadd_ps(a,b,c) _mm_add_ps(_mm_mul_ps(a, b), c)
static __m128 exp4_approx(__m128 X)
{
const __m128 K0 = _mm_set1_ps(0.99992522f);
const __m128 K1 = _mm_set1_ps(0.69583354f);
const __m128 K2 = _mm_set1_ps(0.22606716f);
const __m128 K3 = _mm_set1_ps(0.078024523f);
const __m128 log2_E = _mm_set1_ps(1.44269504);
const __m128 max_in = _mm_set1_ps(50.f);
const __m128 min_in = _mm_set1_ps(-50.f);
const __m128i mask = _mm_set1_epi32(0x7fffffff);
__m128 XF, Y;
__m128i I;
X = _mm_mul_ps(X, log2_E);
X = _mm_max_ps(min_in, _mm_min_ps(max_in, X));
XF = _mm_floor_ps(X);
I = _mm_cvtps_epi32(XF);
X = _mm_sub_ps(X, XF);
Y = _mm_fmadd_ps(_mm_fmadd_ps(_mm_fmadd_ps(K3, X, K2), X, K1), X, K0);
I = _mm_slli_epi32(I, 23);
Y = _mm_castsi128_ps(_mm_and_si128(mask, _mm_add_epi32(I, _mm_castps_si128(Y))));
return Y;
}
static __m256 exp8_approx(__m256 X)
{
__m256 Y;
__m128 Xhi, Xlo, Yhi, Ylo;
Xhi = _mm256_extractf128_ps(X, 1);
Xlo = _mm256_extractf128_ps(X, 0);
Yhi = exp4_approx(Xhi);
Ylo = exp4_approx(Xlo);
Y = _mm256_insertf128_ps(_mm256_setzero_ps(), Yhi, 1);
Y = _mm256_insertf128_ps(Y, Ylo, 0);
return Y;
}
#endif
static float celt_exp(float x)
{
float out[8];
__m256 X, Y;
X = _mm256_set1_ps(x);
Y = exp8_approx(X);
_mm256_storeu_ps(out, Y);
return out[0];
}
static void softmax(float *y, const float *x, int N)
{
int i;
for (i=0;i<N-7;i+=8)
{
__m256 X, Y;
X = _mm256_loadu_ps(&x[i]);
Y = exp8_approx(X);
_mm256_storeu_ps(&y[i], Y);
}
for (;i<N;i++)
y[i] = celt_exp(x[i]);
}
static void vec_tanh(float *y, const float *x, int N)
{
int i;
for (i=0;i<N-7;i+=8)
{
const __m256 two = _mm256_set1_ps(2.f);
const __m256 one = _mm256_set1_ps(1.f);
__m256 X, Y;
X = _mm256_loadu_ps(&x[i]);
X = _mm256_mul_ps(X, two);
Y = exp8_approx(X);
Y = _mm256_mul_ps(_mm256_sub_ps(Y, one), _mm256_rcp_ps(_mm256_add_ps(Y, one)));
_mm256_storeu_ps(&y[i], Y);
}
for (;i<N;i++)
{
float ex2;
ex2 = celt_exp(2*x[i]);
y[i] = (ex2-1)/(ex2+1);
}
}
static void vec_sigmoid(float *y, const float *x, int N)
{
int i;
for (i=0;i<N-7;i+=8)
{
const __m256 one = _mm256_set1_ps(1.f);
__m256 X, Y;
X = _mm256_loadu_ps(&x[i]);
Y = exp8_approx(X);
Y = _mm256_mul_ps(Y, _mm256_rcp_ps(_mm256_add_ps(Y, one)));
_mm256_storeu_ps(&y[i], Y);
}
for (;i<N;i++)
{
float ex;
ex = celt_exp(x[i]);
y[i] = (ex)/(ex+1);
}
}
static void sgemv_accum16(float *out, const float *weights, int rows, int cols, int col_stride, const float *x)
{
// #pragma omp parallel for
for ( int i = 0; i < rows; i += 16 )
{
float *y = &out[i];
__m256 vy0 = _mm256_loadu_ps(&y[0]);
__m256 vy8 = _mm256_loadu_ps(&y[8]);
for ( int j = 0; j < cols; j++ )
{
int weights_id = j*col_stride + i;
__m256 vxj = _mm256_broadcast_ss(&x[j]);
__m256 vw0 = _mm256_loadu_ps(&weights[weights_id]);
__m256 vw8 = _mm256_loadu_ps(&weights[weights_id + 8]);
vy0 = _mm256_fmadd_ps(vw0, vxj, vy0);
vy8 = _mm256_fmadd_ps(vw8, vxj, vy8);
}
_mm256_storeu_ps (&y[0], vy0);
_mm256_storeu_ps (&y[8], vy8);
}
}
static void sparse_sgemv_accum16(float *out, const float *weights, int rows, const int *idx, const float *x)
{
#pragma omp parallel for
for ( int i = 0; i < rows; i += 16 ) {
int tmp_i = i / 16;
int offset_w = idx[3*tmp_i + 1];
int offset_idx = idx[3*tmp_i + 2];
float *y = &out[i];
__m256 vy0 = _mm256_loadu_ps(&y[0]);
__m256 vy8 = _mm256_loadu_ps(&y[8]);
for ( int j = 0; j < idx[3*tmp_i]; j++ ) {
int id = idx[offset_idx + j];
__m256 vxj = _mm256_broadcast_ss(&x[id]);
__m256 vw0 = _mm256_loadu_ps(&weights[offset_w]);
__m256 vw8 = _mm256_loadu_ps(&weights[offset_w+8]);
vy0 = _mm256_fmadd_ps(vw0, vxj, vy0);
vy8 = _mm256_fmadd_ps(vw8, vxj, vy8);
offset_w += 16;
}
_mm256_storeu_ps (&y[0], vy0);
_mm256_storeu_ps (&y[8], vy8);
}
}
|
bug_36720.c | // RUN: %libomp-compile-and-run
/*
Bugzilla: https://bugs.llvm.org/show_bug.cgi?id=36720
Assertion failure at kmp_runtime.cpp(1715): nthreads > 0.
OMP: Error #13: Assertion failure at kmp_runtime.cpp(1715).
The assertion fails even with OMP_NUM_THREADS=1. If the second task is removed,
everything runs to completion. If the "omp parallel for" directives are removed
from inside the tasks, once again everything runs fine.
*/
#define N 1024
int main() {
#pragma omp task
{
int i;
#pragma omp parallel for
for (i = 0; i < N; i++)
(void)0;
}
#pragma omp task
{
int i;
#pragma omp parallel for
for (i = 0; i < N; ++i)
(void)0;
}
#pragma omp taskwait
return 0;
}
|
band.h | // Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file band.h
*
* \brief Contains declaration and partial implementation of sirius::Band class.
*/
#ifndef __BAND_H__
#define __BAND_H__
#include "periodic_function.h"
#include "k_point_set.h"
#include "Hamiltonian/local_operator.hpp"
#include "non_local_operator.h"
#include "hubbard.hpp"
#include "Hamiltonian.h"
namespace sirius {
// TODO: Band problem is a mess and needs more formal organizaiton. We have different basis functions.
// We can do first- and second-variation or a full variation. We can do iterative or exact diagonalization.
// This has to be organized.
// solve_for_kset should be the the main entry point (rename to solve() and pass K_point_set)
// solve() ---> solve_fplapw(K_point) -> |--> second_variation() --> fv_diag() : exact or itrative : sv_diag()
// | |--> single_variation() : exact or iterative
// |
// |
// \--> solve_pppw(K_point) -> |--> davidson()
// |--> rmm_diis()
// |--> chebyshev()
// |--> exact()
/// Setup and solve the eigen value problem.
class Band
{
private:
/// Simulation context.
Simulation_context& ctx_;
/// Alias for the unit cell.
Unit_cell& unit_cell_;
/// BLACS grid for distributed linear algebra operations.
BLACS_grid const& blacs_grid_;
/// Solve the band diagonalziation problem with single (full) variation.
inline int solve_with_single_variation(K_point& kp__, Hamiltonian& hamiltonian__) const;
/// Solve the band diagonalziation problem with second variation approach.
/** This is only used by the FP-LAPW method. */
inline void solve_with_second_variation(K_point& kp__, Hamiltonian& hamiltonian__) const;
/// Solve the first-variational (non-magnetic) problem with exact diagonalization.
/** This is only used by the LAPW method. */
inline void diag_fv_exact(K_point* kp__, Hamiltonian& hamiltonian__) const;
/// Solve the first-variational (non-magnetic) problem with iterative Davidson diagonalization.
inline void diag_fv_davidson(K_point* kp__, Hamiltonian& hamiltonian__) const;
/// Get singular components of the LAPW overlap matrix.
/** Singular components are the eigen-vectors with a very small eigen-value. */
inline void get_singular_components(K_point* kp__, Hamiltonian& H__) const;
/// Exact (not iterative) diagonalization of the Hamiltonian.
template <typename T>
inline void diag_pseudo_potential_exact(K_point* kp__, int ispn__, Hamiltonian& H__) const;
/// Iterative Davidson diagonalization.
template <typename T>
inline int diag_pseudo_potential_davidson(K_point* kp__, Hamiltonian& H__) const;
/// RMM-DIIS diagonalization.
template <typename T>
inline void diag_pseudo_potential_rmm_diis(K_point* kp__, int ispn__, Hamiltonian& H__) const;
template <typename T>
inline void
diag_pseudo_potential_chebyshev(K_point* kp__, int ispn__, Hamiltonian& H__, P_operator<T>& p_op__) const;
/// Auxiliary function used internally by residuals() function.
inline mdarray<double, 1> residuals_aux(K_point* kp__,
int ispn__,
int num_bands__,
std::vector<double>& eval__,
Wave_functions& hpsi__,
Wave_functions& opsi__,
Wave_functions& res__,
mdarray<double, 2>& h_diag__,
mdarray<double, 1>& o_diag__) const;
/// Compute residuals.
template <typename T>
inline int residuals(K_point* kp__,
int ispn__,
int N__,
int num_bands__,
std::vector<double>& eval__,
std::vector<double>& eval_old__,
dmatrix<T>& evec__,
Wave_functions& hphi__,
Wave_functions& ophi__,
Wave_functions& hpsi__,
Wave_functions& opsi__,
Wave_functions& res__,
mdarray<double, 2>& h_diag__,
mdarray<double, 1>& o_diag__) const;
/** Compute \f$ O_{ii'} = \langle \phi_i | \hat O | \phi_{i'} \rangle \f$ operator matrix
* for the subspace spanned by the wave-functions \f$ \phi_i \f$. The matrix is always returned
* in the CPU pointer because most of the standard math libraries start from the CPU. */
template <typename T>
inline void set_subspace_mtrx(int N__,
int n__,
Wave_functions& phi__,
Wave_functions& op_phi__,
dmatrix<T>& mtrx__,
dmatrix<T>& mtrx_old__) const
{
PROFILE("sirius::Band::set_subspace_mtrx");
assert(n__ != 0);
if (mtrx_old__.size()) {
assert(&mtrx__.blacs_grid() == &mtrx_old__.blacs_grid());
}
/* copy old N x N distributed matrix */
if (N__ > 0) {
splindex<block_cyclic> spl_row(N__, mtrx__.blacs_grid().num_ranks_row(), mtrx__.blacs_grid().rank_row(),
mtrx__.bs_row());
splindex<block_cyclic> spl_col(N__, mtrx__.blacs_grid().num_ranks_col(), mtrx__.blacs_grid().rank_col(),
mtrx__.bs_col());
#pragma omp parallel for schedule(static)
for (int i = 0; i < spl_col.local_size(); i++) {
std::copy(&mtrx_old__(0, i), &mtrx_old__(0, i) + spl_row.local_size(), &mtrx__(0, i));
}
if (ctx_.control().print_checksum_) {
double_complex cs(0, 0);
for (int i = 0; i < spl_col.local_size(); i++) {
for (int j = 0; j < spl_row.local_size(); j++) {
cs += mtrx__(j, i);
}
}
mtrx__.blacs_grid().comm().allreduce(&cs, 1);
if (ctx_.comm_band().rank() == 0) {
print_checksum("subspace_mtrx_old", cs);
}
}
}
/* <{phi,phi_new}|Op|phi_new> */
inner(ctx_.processing_unit(), (ctx_.num_mag_dims() == 3) ? 2 : 0, phi__, 0, N__ + n__, op_phi__, N__, n__,
mtrx__, 0, N__);
/* restore lower part */
if (N__ > 0) {
if (mtrx__.blacs_grid().comm().size() == 1) {
#pragma omp parallel for
for (int i = 0; i < N__; i++) {
for (int j = N__; j < N__ + n__; j++) {
mtrx__(j, i) = type_wrapper<T>::bypass(std::conj(mtrx__(i, j)));
}
}
} else {
#ifdef __SCALAPACK
linalg<CPU>::tranc(n__, N__, mtrx__, 0, N__, mtrx__, N__, 0);
#else
TERMINATE_NO_SCALAPACK
#endif
}
}
if (ctx_.control().print_checksum_) {
splindex<block_cyclic> spl_row(N__ + n__, mtrx__.blacs_grid().num_ranks_row(),
mtrx__.blacs_grid().rank_row(), mtrx__.bs_row());
splindex<block_cyclic> spl_col(N__ + n__, mtrx__.blacs_grid().num_ranks_col(),
mtrx__.blacs_grid().rank_col(), mtrx__.bs_col());
double_complex cs(0, 0);
for (int i = 0; i < spl_col.local_size(); i++) {
for (int j = 0; j < spl_row.local_size(); j++) {
cs += mtrx__(j, i);
}
}
mtrx__.blacs_grid().comm().allreduce(&cs, 1);
if (ctx_.comm_band().rank() == 0) {
print_checksum("subspace_mtrx", cs);
}
}
/* kill any numerical noise */
mtrx__.make_real_diag(N__ + n__);
/* save new matrix */
if (mtrx_old__.size()) {
splindex<block_cyclic> spl_row(N__ + n__, mtrx__.blacs_grid().num_ranks_row(),
mtrx__.blacs_grid().rank_row(), mtrx__.bs_row());
splindex<block_cyclic> spl_col(N__ + n__, mtrx__.blacs_grid().num_ranks_col(),
mtrx__.blacs_grid().rank_col(), mtrx__.bs_col());
#pragma omp parallel for schedule(static)
for (int i = 0; i < spl_col.local_size(); i++) {
std::copy(&mtrx__(0, i), &mtrx__(0, i) + spl_row.local_size(), &mtrx_old__(0, i));
}
}
}
/// Diagonalize a pseudo-potential Hamiltonian.
template <typename T>
int diag_pseudo_potential(K_point* kp__, Hamiltonian& H__) const
{
PROFILE("sirius::Band::diag_pseudo_potential");
H__.local_op().prepare(kp__->gkvec_partition());
ctx_.fft_coarse().prepare(kp__->gkvec_partition());
int niter{0};
auto& itso = ctx_.iterative_solver_input();
if (itso.type_ == "exact") {
if (ctx_.num_mag_dims() != 3) {
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++) {
diag_pseudo_potential_exact<double_complex>(kp__, ispn, H__);
}
} else {
STOP();
}
} else if (itso.type_ == "davidson") {
niter = diag_pseudo_potential_davidson<T>(kp__, H__);
} else if (itso.type_ == "rmm-diis") {
if (ctx_.num_mag_dims() != 3) {
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++) {
diag_pseudo_potential_rmm_diis<T>(kp__, ispn, H__);
}
} else {
STOP();
}
} else if (itso.type_ == "chebyshev") {
P_operator<T> p_op(ctx_, kp__->p_mtrx());
if (ctx_.num_mag_dims() != 3) {
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++) {
diag_pseudo_potential_chebyshev<T>(kp__, ispn, H__, p_op);
}
} else {
STOP();
}
} else {
TERMINATE("unknown iterative solver type");
}
/* check residuals */
if (ctx_.control().verification_ >= 1) {
check_residuals<T>(kp__, H__);
}
ctx_.fft_coarse().dismiss();
return niter;
}
template <typename T>
void check_residuals(K_point* kp__,
Hamiltonian& H__) const
{
if (kp__->comm().rank() == 0) {
printf("checking residuals\n");
}
const bool nc_mag = (ctx_.num_mag_dims() == 3);
const int num_sc = nc_mag ? 2 : 1;
auto& psi = kp__->spinor_wave_functions();
Wave_functions hpsi(kp__->gkvec_partition(), ctx_.num_bands(), num_sc);
Wave_functions spsi(kp__->gkvec_partition(), ctx_.num_bands(), num_sc);
Wave_functions res(kp__->gkvec_partition(), ctx_.num_bands(), num_sc);
/* compute residuals */
for (int ispin_step = 0; ispin_step < ctx_.num_spin_dims(); ispin_step++) {
if (nc_mag) {
/* apply Hamiltonian and S operators to the wave-functions */
H__.apply_h_s<T>(kp__, 2, 0, ctx_.num_bands(), psi, hpsi, spsi);
} else {
Wave_functions phi(&psi.pw_coeffs(ispin_step).prime(0, 0), kp__->gkvec_partition(), ctx_.num_bands(), 1);
/* apply Hamiltonian and S operators to the wave-functions */
H__.apply_h_s<T>(kp__, ispin_step, 0, ctx_.num_bands(), phi, hpsi, spsi);
}
for (int ispn = 0; ispn < num_sc; ispn++) {
#pragma omp parallel for schedule(static)
for (int j = 0; j < ctx_.num_bands(); j++) {
for (int ig = 0; ig < kp__->num_gkvec_loc(); ig++) {
res.pw_coeffs(ispn).prime(ig, j) = hpsi.pw_coeffs(ispn).prime(ig, j) -
spsi.pw_coeffs(ispn).prime(ig, j) *
kp__->band_energy(j, ispin_step);
}
}
}
/* get the norm */
auto l2norm = res.l2norm(ctx_.processing_unit(), nc_mag ? 2 : 0, ctx_.num_bands());
if (kp__->comm().rank() == 0) {
for (int j = 0; j < ctx_.num_bands(); j++) {
printf("band: %3i, residual l2norm: %18.12f\n", j, l2norm[j]);
}
}
}
}
public:
/// Constructor
Band(Simulation_context& ctx__)
: ctx_(ctx__)
, unit_cell_(ctx__.unit_cell())
, blacs_grid_(ctx__.blacs_grid())
{
}
/// Solve second-variational problem.
inline void diag_sv(K_point* kp, Hamiltonian& hamiltonian__) const;
/// Solve \f$ \hat H \psi = E \psi \f$ and find eigen-states of the Hamiltonian.
inline void solve_for_kset(K_point_set& kset__, Hamiltonian& hamiltonian__, bool precompute__) const;
/// Initialize the subspace for the entire k-point set.
inline void initialize_subspace(K_point_set& kset__, Hamiltonian& hamiltonian__) const;
/// Initialize the wave-functions subspace.
template <typename T>
inline void initialize_subspace(K_point* kp__, Hamiltonian& hamiltonian__, int num_ao__) const;
static double& evp_work_count()
{
static double evp_work_count_{0};
return evp_work_count_;
}
};
#include "Band/residuals.hpp"
#include "Band/diag_full_potential.hpp"
#include "Band/diag_pseudo_potential.hpp"
#include "Band/initialize_subspace.hpp"
#include "Band/solve.hpp"
}
#endif // __BAND_H__
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/opencl.h"
#include "magick/opencl-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resource_.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/token.h"
#include "magick/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image)
% MagickBooleanType AutoGammaImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o channel: The channels to auto-level. If the special 'SyncChannels'
% flag is set all given channels is adjusted in the same way using the
% mean average of those channels.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image)
{
return(AutoGammaImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType AutoGammaImageChannel(Image *image,
const ChannelType channel)
{
double
gamma,
mean,
logmean,
sans;
MagickStatusType
status;
logmean=log(0.5);
if ((channel & SyncChannels) != 0)
{
/*
Apply gamma correction equally accross all given channels
*/
(void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception);
gamma=log(mean*QuantumScale)/logmean;
return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma));
}
/*
Auto-gamma each channel separateally
*/
status = MagickTrue;
if ((channel & RedChannel) != 0)
{
(void) GetImageChannelMean(image,RedChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange,
gamma);
}
if ((channel & GreenChannel) != 0)
{
(void) GetImageChannelMean(image,GreenChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange,
gamma);
}
if ((channel & BlueChannel) != 0)
{
(void) GetImageChannelMean(image,BlueChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange,
gamma);
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
(void) GetImageChannelMean(image,OpacityChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange,
gamma);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
(void) GetImageChannelMean(image,IndexChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange,
gamma);
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image)
% MagickBooleanType AutoLevelImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o channel: The channels to auto-level. If the special 'SyncChannels'
% flag is set the min/max/mean value of all given channels is used for
% all given channels, to all channels in the same way.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image)
{
return(AutoLevelImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType AutoLevelImageChannel(Image *image,
const ChannelType channel)
{
/*
Convenience method for a min/max histogram stretch.
*/
return(MinMaxStretchImage(image,channel,0.0,0.0));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast)
% MagickBooleanType BrightnessContrastImageChannel(Image *image,
% const ChannelType channel,const double brightness,
% const double contrast)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast)
{
MagickBooleanType
status;
status=BrightnessContrastImageChannel(image,DefaultChannels,brightness,
contrast);
return(status);
}
MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image,
const ChannelType channel,const double brightness,const double contrast)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
intercept,
coefficients[2],
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients,
&image->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MaxTextExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelPacket
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
exception=(&image->exception);
ccc=NewXMLTree((const char *) color_correction_collection,&image->exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetNextToken(p,&p,MaxTextExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power)))));
cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power)))));
cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power)))));
}
if (image->storage_class == PseudoClass)
{
/*
Apply transfer function to colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
luma;
luma=0.212656*image->colormap[i].red+0.715158*image->colormap[i].green+
0.072186*image->colormap[i].blue;
image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation*
cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma);
image->colormap[i].green=ClampToQuantum(luma+
color_correction.saturation*cdl_map[ScaleQuantumToMap(
image->colormap[i].green)].green-luma);
image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation*
cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma);
}
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+
0.072186*GetPixelBlue(q);
SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma)));
SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma)));
SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image)
% MagickBooleanType ClutImageChannel(Image *image,
% const ChannelType channel,Image *clut_image)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image)
{
return(ClutImageChannel(image,DefaultChannels,clut_image));
}
MagickExport MagickBooleanType ClutImageChannel(Image *image,
const ChannelType channel,const Image *clut_image)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
*clut_map;
register ssize_t
i;
ssize_t
adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
exception=(&image->exception);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace);
clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*clut_map));
if (clut_map == (MagickPixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireAuthenticCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetMagickPixelPacket(clut_image,clut_map+i);
status=InterpolateMagickPixelPacket(clut_image,clut_view,
UndefinedInterpolatePixel,(double) i*(clut_image->columns-adjust)/MaxMap,
(double) i*(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
if (status == MagickFalse)
break;
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
GetMagickPixelPacket(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampPixelRed(clut_map+
ScaleQuantumToMap(GetPixelRed(q))));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampPixelGreen(clut_map+
ScaleQuantumToMap(GetPixelGreen(q))));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampPixelBlue(clut_map+
ScaleQuantumToMap(GetPixelBlue(q))));
if ((channel & OpacityChannel) != 0)
{
if (clut_image->matte == MagickFalse)
SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+
ScaleQuantumToMap((Quantum) GetPixelAlpha(q))));
else
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampPixelOpacity(clut_map+
ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel))));
else
SetPixelOpacity(q,ClampPixelOpacity(
clut_map+ScaleQuantumToMap(GetPixelOpacity(q))));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t)
GetPixelIndex(indexes+x))->index));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map);
if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
*/
static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
Contrast(sign,&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
}
/*
Contrast enhance image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateContrastImage(image,sharpen,&image->exception);
if (status != MagickFalse)
return status;
#endif
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
blue,
green,
red;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=GetPixelRed(q);
green=GetPixelGreen(q);
blue=GetPixelBlue(q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by `stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% `enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels)
% MagickBooleanType ContrastStretchImageChannel(Image *image,
% const size_t channel,const double black_point,
% const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const char *levels)
{
double
black_point,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) image->columns*image->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) QuantumRange/100.0;
white_point*=(double) QuantumRange/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) image->columns*image->rows-black_point;
status=ContrastStretchImageChannel(image,DefaultChannels,black_point,
white_point);
return(status);
}
MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point)
{
#define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
intensity;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
black,
*histogram,
white;
QuantumPixelPacket
*stretch_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=(&image->exception);
#if defined(MAGICKCORE_OPENCL_SUPPORT) && 0
/* Call OpenCL version */
status=AccelerateContrastStretchImageChannel(image,channel,black_point,
white_point,&image->exception);
if (status != MagickFalse)
return status;
#endif
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*stretch_map));
if ((histogram == (MagickPixelPacket *) NULL) ||
(stretch_map == (QuantumPixelPacket *) NULL))
{
if (stretch_map != (QuantumPixelPacket *) NULL)
stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map);
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace);
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if ((channel & SyncChannels) != 0)
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
intensity;
intensity=ClampToQuantum(GetPixelIntensity(image,p));
histogram[ScaleQuantumToMap(intensity)].red++;
histogram[ScaleQuantumToMap(intensity)].green++;
histogram[ScaleQuantumToMap(intensity)].blue++;
histogram[ScaleQuantumToMap(intensity)].index++;
p++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
p++;
}
}
/*
Find the histogram boundaries by locating the black/white levels.
*/
black.red=0.0;
white.red=MaxRange(QuantumRange);
if ((channel & RedChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].red;
if (intensity > black_point)
break;
}
black.red=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].red;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.red=(MagickRealType) i;
}
black.green=0.0;
white.green=MaxRange(QuantumRange);
if ((channel & GreenChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].green;
if (intensity > black_point)
break;
}
black.green=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].green;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.green=(MagickRealType) i;
}
black.blue=0.0;
white.blue=MaxRange(QuantumRange);
if ((channel & BlueChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].blue;
if (intensity > black_point)
break;
}
black.blue=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].blue;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.blue=(MagickRealType) i;
}
black.opacity=0.0;
white.opacity=MaxRange(QuantumRange);
if ((channel & OpacityChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].opacity;
if (intensity > black_point)
break;
}
black.opacity=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].opacity;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.opacity=(MagickRealType) i;
}
black.index=0.0;
white.index=MaxRange(QuantumRange);
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].index;
if (intensity > black_point)
break;
}
black.index=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].index;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.index=(MagickRealType) i;
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) memset(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & RedChannel) != 0)
{
if (i < (ssize_t) black.red)
stretch_map[i].red=(Quantum) 0;
else
if (i > (ssize_t) white.red)
stretch_map[i].red=QuantumRange;
else
if (black.red != white.red)
stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.red)/(white.red-black.red)));
}
if ((channel & GreenChannel) != 0)
{
if (i < (ssize_t) black.green)
stretch_map[i].green=0;
else
if (i > (ssize_t) white.green)
stretch_map[i].green=QuantumRange;
else
if (black.green != white.green)
stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.green)/(white.green-black.green)));
}
if ((channel & BlueChannel) != 0)
{
if (i < (ssize_t) black.blue)
stretch_map[i].blue=0;
else
if (i > (ssize_t) white.blue)
stretch_map[i].blue= QuantumRange;
else
if (black.blue != white.blue)
stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.blue)/(white.blue-black.blue)));
}
if ((channel & OpacityChannel) != 0)
{
if (i < (ssize_t) black.opacity)
stretch_map[i].opacity=0;
else
if (i > (ssize_t) white.opacity)
stretch_map[i].opacity=QuantumRange;
else
if (black.opacity != white.opacity)
stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.opacity)/(white.opacity-black.opacity)));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (i < (ssize_t) black.index)
stretch_map[i].index=0;
else
if (i > (ssize_t) white.index)
stretch_map[i].index=QuantumRange;
else
if (black.index != white.index)
stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.index)/(white.index-black.index)));
}
}
/*
Stretch the image.
*/
if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)))
image->storage_class=DirectClass;
if (image->storage_class == PseudoClass)
{
/*
Stretch colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
image->colormap[i].red=stretch_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
image->colormap[i].green=stretch_map[
ScaleQuantumToMap(image->colormap[i].green)].green;
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
image->colormap[i].blue=stretch_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue;
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
image->colormap[i].opacity=stretch_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity;
}
}
}
/*
Stretch image.
*/
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
SetPixelRed(q,stretch_map[
ScaleQuantumToMap(GetPixelRed(q))].red);
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
SetPixelGreen(q,stretch_map[
ScaleQuantumToMap(GetPixelGreen(q))].green);
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
SetPixelBlue(q,stretch_map[
ScaleQuantumToMap(GetPixelBlue(q))].blue);
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
SetPixelOpacity(q,stretch_map[
ScaleQuantumToMap(GetPixelOpacity(q))].opacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (black.index != white.index)
SetPixelIndex(indexes+x,stretch_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))].index);
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ContrastStretchImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelOpacity(r)+pixel.opacity)/2.0; \
distance=QuantumScale*((double) GetPixelOpacity(r)-pixel.opacity); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(r); \
aggregate.green+=(weight)*GetPixelGreen(r); \
aggregate.blue+=(weight)*GetPixelBlue(r); \
aggregate.opacity+=(weight)*GetPixelOpacity(r); \
total_weight+=(weight); \
} \
r++;
#define EnhanceImageTag "Enhance/Image"
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns < 5) || (image->rows < 5))
return((Image *) NULL);
enhance_image=CloneImage(image,0,0,MagickTrue,exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse)
{
InheritException(exception,&enhance_image->exception);
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
(void) memset(&zero,0,sizeof(zero));
image_view=AcquireAuthenticCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
/*
Read another scan line.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
MagickPixelPacket
aggregate;
PixelPacket
pixel;
register const PixelPacket
*magick_restrict r;
/*
Compute weighted average of target pixel color components.
*/
aggregate=zero;
total_weight=0.0;
r=p+2*(image->columns+4)+2;
pixel=(*r);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
if (total_weight > MagickEpsilon)
{
SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight);
SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/total_weight);
SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight);
SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/
total_weight);
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image)
% MagickBooleanType EqualizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image)
{
return(EqualizeImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType EqualizeImageChannel(Image *image,
const ChannelType channel)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
black,
*histogram,
intensity,
*map,
white;
QuantumPixelPacket
*equalize_map;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=(&image->exception);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/* Call OpenCL version */
status=AccelerateEqualizeImage(image,channel,&image->exception);
if (status != MagickFalse)
return status;
#endif
/*
Allocate and initialize histogram arrays.
*/
equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*equalize_map));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map));
if ((equalize_map == (QuantumPixelPacket *) NULL) ||
(histogram == (MagickPixelPacket *) NULL) ||
(map == (MagickPixelPacket *) NULL))
{
if (map != (MagickPixelPacket *) NULL)
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (equalize_map != (QuantumPixelPacket *) NULL)
equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(
equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
if ((channel & SyncChannels) != 0)
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))].red++;
p++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
p++;
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
(void) memset(&intensity,0,sizeof(intensity));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & SyncChannels) != 0)
{
intensity.red+=histogram[i].red;
map[i]=intensity;
continue;
}
if ((channel & RedChannel) != 0)
intensity.red+=histogram[i].red;
if ((channel & GreenChannel) != 0)
intensity.green+=histogram[i].green;
if ((channel & BlueChannel) != 0)
intensity.blue+=histogram[i].blue;
if ((channel & OpacityChannel) != 0)
intensity.opacity+=histogram[i].opacity;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
intensity.index+=histogram[i].index;
map[i]=intensity;
}
black=map[0];
white=map[(int) MaxMap];
(void) memset(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & SyncChannels) != 0)
{
if (white.red != black.red)
equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].red-black.red))/(white.red-black.red)));
continue;
}
if (((channel & RedChannel) != 0) && (white.red != black.red))
equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].red-black.red))/(white.red-black.red)));
if (((channel & GreenChannel) != 0) && (white.green != black.green))
equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].green-black.green))/(white.green-black.green)));
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].blue-black.blue))/(white.blue-black.blue)));
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].opacity-black.opacity))/(white.opacity-black.opacity)));
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].index-black.index))/(white.index-black.index)));
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
/*
Equalize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & SyncChannels) != 0)
{
if (white.red != black.red)
{
image->colormap[i].red=equalize_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
image->colormap[i].green=equalize_map[
ScaleQuantumToMap(image->colormap[i].green)].red;
image->colormap[i].blue=equalize_map[
ScaleQuantumToMap(image->colormap[i].blue)].red;
image->colormap[i].opacity=equalize_map[
ScaleQuantumToMap(image->colormap[i].opacity)].red;
}
continue;
}
if (((channel & RedChannel) != 0) && (white.red != black.red))
image->colormap[i].red=equalize_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
if (((channel & GreenChannel) != 0) && (white.green != black.green))
image->colormap[i].green=equalize_map[
ScaleQuantumToMap(image->colormap[i].green)].green;
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
image->colormap[i].blue=equalize_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue;
if (((channel & OpacityChannel) != 0) &&
(white.opacity != black.opacity))
image->colormap[i].opacity=equalize_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity;
}
}
/*
Equalize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & SyncChannels) != 0)
{
if (white.red != black.red)
{
SetPixelRed(q,equalize_map[
ScaleQuantumToMap(GetPixelRed(q))].red);
SetPixelGreen(q,equalize_map[
ScaleQuantumToMap(GetPixelGreen(q))].red);
SetPixelBlue(q,equalize_map[
ScaleQuantumToMap(GetPixelBlue(q))].red);
SetPixelOpacity(q,equalize_map[
ScaleQuantumToMap(GetPixelOpacity(q))].red);
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,equalize_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))].red);
}
q++;
continue;
}
if (((channel & RedChannel) != 0) && (white.red != black.red))
SetPixelRed(q,equalize_map[
ScaleQuantumToMap(GetPixelRed(q))].red);
if (((channel & GreenChannel) != 0) && (white.green != black.green))
SetPixelGreen(q,equalize_map[
ScaleQuantumToMap(GetPixelGreen(q))].green);
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
SetPixelBlue(q,equalize_map[
ScaleQuantumToMap(GetPixelBlue(q))].blue);
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
SetPixelOpacity(q,equalize_map[
ScaleQuantumToMap(GetPixelOpacity(q))].opacity);
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
SetPixelIndex(indexes+x,equalize_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))].index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const char *level)
% MagickBooleanType GammaImageChannel(Image *image,
% const ChannelType channel,const double gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const char *level)
{
GeometryInfo
geometry_info;
MagickPixelPacket
gamma;
MagickStatusType
flags,
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (level == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(level,&geometry_info);
gamma.red=geometry_info.rho;
gamma.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
gamma.green=gamma.red;
gamma.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
gamma.blue=gamma.red;
if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0))
return(MagickTrue);
if ((gamma.red == gamma.green) && (gamma.green == gamma.blue))
status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel |
BlueChannel),(double) gamma.red);
else
{
status=GammaImageChannel(image,RedChannel,(double) gamma.red);
status&=GammaImageChannel(image,GreenChannel,(double) gamma.green);
status&=GammaImageChannel(image,BlueChannel,(double) gamma.blue);
}
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType GammaImageChannel(Image *image,
const ChannelType channel,const double gamma)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=(&image->exception);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma))));
if (image->storage_class == PseudoClass)
{
/*
Gamma-correct colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((channel & RedChannel) != 0)
image->colormap[i].red=gamma_map[ScaleQuantumToMap(
image->colormap[i].red)];
if ((channel & GreenChannel) != 0)
image->colormap[i].green=gamma_map[ScaleQuantumToMap(
image->colormap[i].green)];
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=gamma_map[ScaleQuantumToMap(
image->colormap[i].blue)];
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
image->colormap[i].opacity=gamma_map[ScaleQuantumToMap(
image->colormap[i].opacity)];
else
image->colormap[i].opacity=QuantumRange-gamma_map[
ScaleQuantumToMap((Quantum) (QuantumRange-
image->colormap[i].opacity))];
}
#else
if ((channel & RedChannel) != 0)
image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].red,1.0/gamma);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].green,1.0/gamma);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].blue,1.0/gamma);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
image->colormap[i].opacity=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].opacity,1.0/gamma);
else
image->colormap[i].opacity=QuantumRange-QuantumRange*gamma_pow(
QuantumScale*(QuantumRange-image->colormap[i].opacity),1.0/
gamma);
}
#endif
}
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((channel & SyncChannels) != 0)
{
SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]);
SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]);
SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]);
}
else
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,gamma_map[ScaleQuantumToMap(
GetPixelOpacity(q))]);
else
SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum)
GetPixelAlpha(q))]);
}
}
#else
if ((channel & SyncChannels) != 0)
{
SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q),
1.0/gamma));
SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*GetPixelGreen(q),
1.0/gamma));
SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q),
1.0/gamma));
}
else
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q),
1.0/gamma));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*
GetPixelGreen(q),1.0/gamma));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q),
1.0/gamma));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,QuantumRange*gamma_pow(QuantumScale*
GetPixelOpacity(q),1.0/gamma));
else
SetPixelAlpha(q,QuantumRange*gamma_pow(QuantumScale*
GetPixelAlpha(q),1.0/gamma));
}
}
#endif
q++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap(
GetPixelIndex(indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,GammaCorrectImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the colors in the reference image to gray.
%
% The format of the GrayscaleImageChannel method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
/*
Grayscale image.
*/
/* call opencl version */
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,&image->exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace));
return(SetImageColorspace(image,GRAYColorspace));
}
#endif
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
intensity,
red;
red=(MagickRealType) q->red;
green=(MagickRealType) q->green;
blue=(MagickRealType) q->blue;
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/(3.0*QuantumRange));
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(q,ClampToQuantum(intensity));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,GrayscaleImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace));
return(SetImageColorspace(image,GRAYColorspace));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image)
% MagickBooleanType HaldClutImageChannel(Image *image,
% const ChannelType channel,Image *hald_image)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image)
{
return(HaldClutImageChannel(image,DefaultChannels,hald_image));
}
MagickExport MagickBooleanType HaldClutImageChannel(Image *image,
const ChannelType channel,const Image *hald_image)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
MagickRealType
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetMagickPixelPacket(hald_image,&zero);
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
hald_view=AcquireAuthenticCacheView(hald_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,hald_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
offset;
HaldInfo
point;
MagickPixelPacket
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(hald_view);
pixel=zero;
pixel1=zero;
pixel2=zero;
pixel3=zero;
pixel4=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
point.x=QuantumScale*(level-1.0)*GetPixelRed(q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(q);
offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z));
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
status=InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width),
&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/
width),&pixel2,exception);
if (status == MagickFalse)
break;
MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2,
pixel2.opacity,point.y,&pixel3);
offset+=cube_size;
status=InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width),
&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/
width),&pixel2,exception);
if (status == MagickFalse)
break;
MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2,
pixel2.opacity,point.y,&pixel4);
MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4,
pixel4.opacity,point.z,&pixel);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(pixel.index));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImageChannel() and LevelizeImageChannel(), below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o levels: Specify the levels where the black and white points have the
% range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2).
% A '!' flag inverts the re-mapping.
%
*/
MagickExport MagickBooleanType LevelImage(Image *image,const char *levels)
{
double
black_point,
gamma,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(double) image->columns*image->rows/100.0;
white_point*=(double) image->columns*image->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) QuantumRange-black_point;
if ((flags & AspectValue ) == 0)
status=LevelImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
else
status=LevelizeImage(image,black_point,white_point,gamma);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() applies the normal level operation to the image, spreading
% out the values between the black and white points over the entire range of
% values. Gamma correction is also applied after the values has been mapped.
%
% It is typically used to improve image contrast, or to provide a controlled
% linear threshold for the image. If the black and white points are set to
% the minimum and maximum values found in the image, the image can be
% normalized. or by swapping black and white values, negate the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma)
% MagickBooleanType LevelImageChannel(Image *image,
% const ChannelType channel,const double black_point,
% const double white_point,const double gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level which is to be mapped to zero (black)
%
% o white_point: The level which is to be mapped to QuantumRange (white)
%
% o gamma: adjust gamma by this factor before mapping values.
% use 1.0 for purely linear stretching of image color values
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const MagickRealType pixel)
{
double
level_pixel,
scale;
scale=PerceptibleReciprocal(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),1.0/
gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,(MagickRealType) image->colormap[i].red));
if ((channel & GreenChannel) != 0)
image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel(
black_point,white_point,gamma,(MagickRealType)
image->colormap[i].green));
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,(MagickRealType) image->colormap[i].blue));
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum)
ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) (QuantumRange-image->colormap[i].opacity))));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelRed(q))));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelGreen(q))));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelBlue(q))));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelAlpha(q))));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point,
white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImageChannel() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImageChannel() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used for example de-contrast a greyscale image to the exact
% levels specified. Or by using specific levels for each channel of an image
% you can convert a gray-scale image to any linear color gradient, according
% to those levels.
%
% The format of the LevelizeImageChannel method is:
%
% MagickBooleanType LevelizeImageChannel(Image *image,
% const ChannelType channel,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma)
{
MagickBooleanType
status;
status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
return(status);
}
MagickExport MagickBooleanType LevelizeImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=LevelizeValue(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=LevelizeValue(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=LevelizeValue(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue(
QuantumRange-image->colormap[i].opacity));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,LevelizeValue(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,LevelizeValue(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,LevelizeValue(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,LevelizeValue(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColor() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelColorsImageChannel method is:
%
% MagickBooleanType LevelColorsImage(Image *image,
% const MagickPixelPacket *black_color,
% const MagickPixelPacket *white_color,const MagickBooleanType invert)
% MagickBooleanType LevelColorsImageChannel(Image *image,
% const ChannelType channel,const MagickPixelPacket *black_color,
% const MagickPixelPacket *white_color,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
*/
MagickExport MagickBooleanType LevelColorsImage(Image *image,
const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
const MagickBooleanType invert)
{
MagickBooleanType
status;
status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color,
invert);
return(status);
}
MagickExport MagickBooleanType LevelColorsImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *black_color,
const MagickPixelPacket *white_color,const MagickBooleanType invert)
{
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) != MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) != MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((channel & RedChannel) != 0)
status&=LevelImageChannel(image,RedChannel,black_color->red,
white_color->red,(double) 1.0);
if ((channel & GreenChannel) != 0)
status&=LevelImageChannel(image,GreenChannel,black_color->green,
white_color->green,(double) 1.0);
if ((channel & BlueChannel) != 0)
status&=LevelImageChannel(image,BlueChannel,black_color->blue,
white_color->blue,(double) 1.0);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
status&=LevelImageChannel(image,OpacityChannel,black_color->opacity,
white_color->opacity,(double) 1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status&=LevelImageChannel(image,IndexChannel,black_color->index,
white_color->index,(double) 1.0);
}
else
{
if ((channel & RedChannel) != 0)
status&=LevelizeImageChannel(image,RedChannel,black_color->red,
white_color->red,(double) 1.0);
if ((channel & GreenChannel) != 0)
status&=LevelizeImageChannel(image,GreenChannel,black_color->green,
white_color->green,(double) 1.0);
if ((channel & BlueChannel) != 0)
status&=LevelizeImageChannel(image,BlueChannel,black_color->blue,
white_color->blue,(double) 1.0);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
status&=LevelizeImageChannel(image,OpacityChannel,black_color->opacity,
white_color->opacity,(double) 1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status&=LevelizeImageChannel(image,IndexChannel,black_color->index,
white_color->index,(double) 1.0);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point)
{
#define LinearStretchImageTag "LinearStretch/Image"
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickRealType
*histogram,
intensity;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
exception=(&image->exception);
histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
if (histogram == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
histogram[ScaleQuantumToMap(ClampToQuantum(GetPixelIntensity(image,p)))]++;
p++;
}
}
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(MagickRealType *) RelinquishMagickMemory(histogram);
status=LevelImageChannel(image,DefaultChannels,(double)
ScaleMapToQuantum(black),(double) ScaleMapToQuantum(white),1.0);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and
% hue.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,Quantum *red,
Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,Quantum *red,
Quantum *green,Quantum *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,Quantum *red,
Quantum *green,Quantum *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
ExceptionInfo
*exception;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
Quantum
blue,
green,
red;
/*
Modulate image colormap.
*/
red=image->colormap[i].red;
green=image->colormap[i].green;
blue=image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
case LCHColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
/* call opencl version */
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,&image->exception);
if (status != MagickFalse)
return status;
#endif
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
blue,
green,
red;
red=GetPixelRed(q);
green=GetPixelGreen(q);
blue=GetPixelBlue(q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImageChannel method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale)
% MagickBooleanType NegateImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType grayscale)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale)
{
MagickBooleanType
status;
status=NegateImageChannel(image,DefaultChannels,grayscale);
return(status);
}
MagickExport MagickBooleanType NegateImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType grayscale)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
/*
Negate colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if (grayscale != MagickFalse)
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((channel & RedChannel) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((channel & GreenChannel) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
if (grayscale != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRed(q) != GetPixelGreen(q)) ||
(GetPixelGreen(q) != GetPixelBlue(q)))
{
q++;
continue;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,QuantumRange-GetPixelRed(q));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,QuantumRange-GetPixelGreen(q));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,QuantumRange-GetPixelBlue(q));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,NegateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (channel == DefaultChannels)
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x));
SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x));
SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x));
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q+x,QuantumRange-GetPixelOpacity(q+x));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image)
% MagickBooleanType NormalizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image)
{
MagickBooleanType
status;
status=NormalizeImageChannel(image,DefaultChannels);
return(status);
}
MagickExport MagickBooleanType NormalizeImageChannel(Image *image,
const ChannelType channel)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImageChannel(image,channel,black_point,white_point));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels)
% MagickBooleanType SigmoidalContrastImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType sharpen,
% const double contrast,const double midpoint)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
*/
/*
ImageMagick 7 has a version of this function which does not use LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const char *levels)
{
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
flags=ParseGeometry(levels,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0*QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0;
status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen,
geometry_info.rho,geometry_info.sigma);
return(status);
}
MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType sharpen,
const double contrast,const double midpoint)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*sigmoidal_map;
register ssize_t
i;
ssize_t
y;
/*
Side effect: clamps values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Allocate and initialize sigmoidal maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=(&image->exception);
sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*sigmoidal_map));
if (sigmoidal_map == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map));
if (sharpen != MagickFalse)
for (i=0; i <= (ssize_t) MaxMap; i++)
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType)
(MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/
MaxMap)));
else
for (i=0; i <= (ssize_t) MaxMap; i++)
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (
MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/
MaxMap)));
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].red)]);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].green)]);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].blue)]);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].opacity)]);
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelRed(q))]));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelGreen(q))]));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelBlue(q))]));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelOpacity(q))]));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelIndex(indexes+x))]));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map);
return(status);
}
|
NQueen-Paralelo.c | /*Proyecto Final de Algoritmos
Sebastian Gonzalo Vives Faus - A01025211
Sergio Hernandez Castillo - A01025210
Descripción: Problema de las N Reinas resuelto paralelizado
*/
#include <omp.h>
#include <stdio.h>
#include <stdbool.h>
//Variables globales
int n = 0; //Tamaño del tablero
int soluciones = 0;
bool imprimir = false;
//Metodo para imprimir
void print(int arr[]){
printf("Soulcion %d \n", soluciones);
for(int i =0; i<n;i++){
for(int j = 0; j<n; j++){
if(arr[i] == j){ //Si el valor en mi hilera es el mismo que en mi columna (Hay una reinas)
printf("Q ");
}//Close if
else{
printf("0 ");
}//Close else
}//Close for
printf("\n");
}//Close for
}//Close print()
//Metodo complementario a Reinas
void InsertarReinas(int arr[], int row, int col, int t_id){
/*Tenemos que comprobar en cada hilera si podemos insertar a una reina.
Para que cumpla la regla, ninguna reina se puede atacar entre si (siguiendo las reglas del ajedrez),
por lo que hay comprobar ataques verticales y diagonales.*/
for(int i = 0; i < row; i++){
//Ataques verticales
if (arr[i] == col){ //Si la reina se encuentra en la misma columna, nos salimos
//printf("V Ouch! en la hilera %d",row);
return;
}//Close if
//Ataques diagonales (Hay que utilizar la formula de diagonal)
if((abs(arr[i] - col) == (row-i))){
//printf("Q Ouch! en la hilera %d",row);
return;
}//Close if
}//Close for
/*Si ninguna de esos dos ataques se cumplen, podemos asumir que es seguro colocar la reina en esta
columna en la hilera actual.*/
arr[row] = col;
//Checamos si hay mas hileras o si llegamos a la hilera final
if(row == n-1){//Si llegamos a la ultima hilera y pusimos una reina, podemos asumir que la tabla es una solucion.
#pragma omp atomic
soluciones++; //Le sumamos 1 a las soluciones
//Aqui imprimimos la tabla, si el usuario eligio imprimirla
if(imprimir == true){
#pragma omp critical
{
/*Si el usuario decide imprimir las soluciones, imprimira la solucion encontrada, la igual que el
thread que la encontro.*/
printf("Thread %d encontro una solucion! \n",t_id);
print(arr);
}//Close pragma
}//Close if
}//Close if
/*Si aun quedan mas hileras, vamos a la siguiente hilera, tratando de encontrar una solucion poniendo una
reina en cada columna de la siguiente hilera.*/
else{
for(int i = 0; i<n; i++){
InsertarReinas(arr, row+1, i,t_id);
}//Clos for
}//Close else
}//Close InsertarReinas()
//Metodo principal
void Reinas(){
int t_id; //Variable para obtener el ID de cada thread
int i;
//AQUI MODIFICAMOS EL TIPO DE SCHEDULE, Aqui es donde implementamos nuestro parallel for.
#pragma omp parallel for schedule(guided,1) private(t_id)
for (i = 0; i<n; i++){
t_id = omp_get_thread_num(); //Obtenemos el id del thread
//Vamos a probar poniendo una reina en cada columna de la primer fila.
//printf("Buscando solucion en el tablero donde la reina empieza en la hilera 0 y columna %d \n", i);
int arr[n]; //A diferencia del secuencial, le asignamos un tablero a cada thread.
//Donde: arr (Arreglo/tablero), 0 (row 0 en donde vamos a probar acada reina), i (columna en donde ponemos la primer reina.)
InsertarReinas(arr, 0, i, t_id);
}//Close for
}//Close Reinas()
//Main
int main(int argc, const char* argv[]){
//Inilizacion al ejecutar el programa ./a.out
if(argc < 4){
printf("Error!: Ejecutar como ./a.out <tamaño del tablero> <numero de threads> <0/1 para imprimir tablas o no> \n");
return 0;
}//Close if
//Asignar el tama;o del tablero
n = atoi(argv[1]); //Donde argv[1] (string) es el numero que el usuario asigna al ejectuar el programa
//Obtener numero de threads
int threads = atoi(argv[2]);
omp_set_num_threads(threads);
//Si se imprimen las soluciones o no.
int option = atoi(argv[3]);
if(option == 1){
imprimir = true;
}//Close if
//Llamar al metodo
double start = omp_get_wtime();
Reinas();
double finish = omp_get_wtime();
printf("Soluciones con un tablero de %d x %d : %d con un tiempo de ejecucion de: %f \n",n,n,soluciones, finish-start);
}//Close main() |
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "dark_cuda.h"
#include "box.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define NUMCHARS 37
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = random_gen()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed)
{
int speed = rand_int(1, augment_speed);
if (speed < 1) speed = 1;
char** sequentia_paths = (char**)calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
//printf("n = %d, mini_batch = %d \n", n, mini_batch);
unsigned int *start_time_indexes = (unsigned int *)calloc(mini_batch, sizeof(unsigned int));
for (i = 0; i < mini_batch; ++i) {
start_time_indexes[i] = random_gen() % m;
//printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]);
}
for (i = 0; i < n; ++i) {
do {
int time_line_index = i % mini_batch;
unsigned int index = start_time_indexes[time_line_index] % m;
start_time_indexes[time_line_index] += speed;
//int index = random_gen() % m;
sequentia_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
//printf(" index = %u - grp: %s \n", index, paths[index]);
if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]);
} while (strlen(sequentia_paths[i]) == 0);
}
free(start_time_indexes);
pthread_mutex_unlock(&mutex);
return sequentia_paths;
}
char **get_random_paths(char **paths, int n, int m)
{
char** random_paths = (char**)calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
//printf("n = %d \n", n);
for(i = 0; i < n; ++i){
do {
int index = random_gen() % m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
//printf("grp: %s\n", paths[index]);
if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]);
} while (strlen(random_paths[i]) == 0);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char** replace_paths = (char**)calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_augment_image(im, angle, aspect, min, max, size);
int flip = use_flip ? random_gen() % 2 : 0;
if (flip)
flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
extern int check_mistakes;
box_label *read_boxes(char *filename, int *n)
{
box_label* boxes = (box_label*)calloc(1, sizeof(box_label));
FILE *file = fopen(filename, "r");
if (!file) {
printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename);
//file_error(filename);
FILE* fw = fopen("bad.list", "a");
fwrite(filename, sizeof(char), strlen(filename), fw);
char *new_line = "\n";
fwrite(new_line, sizeof(char), strlen(new_line), fw);
fclose(fw);
if (check_mistakes) getchar();
*n = 0;
return boxes;
}
float x, y, h, w;
int id;
int count = 0;
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
boxes = (box_label*)realloc(boxes, (count + 1) * sizeof(box_label));
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = random_gen()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 ||
(boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1)
{
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 30; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .001 || h < .001) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy,
int net_w, int net_h)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
int i;
box_label *boxes = read_boxes(labelpath, &count);
int min_w_h = 0;
float lowest_w = 1.F / net_w;
float lowest_h = 1.F / net_h;
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if (count > num_boxes) count = num_boxes;
float x, y, w, h;
int id;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
// not detect small objects
//if ((w < 0.001F || h < 0.001F)) continue;
// if truth (box for object) is smaller than 1x1 pix
char buff[256];
if (id >= classes) {
printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d] \n", id, (classes-1));
sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1));
system(buff);
getchar();
++sub;
continue;
}
if ((w < lowest_w || h < lowest_h)) {
//sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath);
//system(buff);
++sub;
continue;
}
if (x == 999999 || y == 999999) {
printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1 \n");
sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (x <= 0 || x > 1 || y <= 0 || y > 1) {
printf("\n Wrong annotation: x = %f, y = %f \n", x, y);
sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (w > 1) {
printf("\n Wrong annotation: w = %f \n", w);
sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w);
system(buff);
w = 1;
if (check_mistakes) getchar();
}
if (h > 1) {
printf("\n Wrong annotation: h = %f \n", h);
sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h);
system(buff);
h = 1;
if (check_mistakes) getchar();
}
if (x == 0) x += lowest_w;
if (y == 0) y += lowest_h;
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
if (min_w_h == 0) min_w_h = w*net_w;
if (min_w_h > w*net_w) min_w_h = w*net_w;
if (min_w_h > h*net_h) min_w_h = h*net_h;
}
free(boxes);
return min_w_h;
}
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
}
}
if (count != 1) {
printf("Too many or too few labels: %d, %s\n", count, path);
count = 0;
for (i = 0; i < k; ++i) {
if (strstr(path, labels[i])) {
printf("\t label %d: %s \n", count, labels[i]);
count++;
}
}
}
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "imgs", "labels", label);
find_replace(label, "_iconl.jpeg", ".txt", label);
FILE *file = fopen(label, "r");
if(!file){
find_replace(label, "labels", "labels2", label);
file = fopen(label, "r");
if(!file) continue;
}
++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
printf("%d/%d\n", count, n);
return y;
}
char **get_labels_custom(char *filename, int *size)
{
list *plist = get_paths(filename);
if(size) *size = plist->size;
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
char **get_labels(char *filename)
{
return get_labels_custom(filename, NULL);
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = (float*)calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = random_gen()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*30;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
void blend_truth(float *new_truth, int boxes, float *old_truth)
{
const int t_size = 4 + 1;
int count_new_truth = 0;
int t;
for (t = 0; t < boxes; ++t) {
float x = new_truth[t*(4 + 1)];
if (!x) break;
count_new_truth++;
}
for (t = count_new_truth; t < boxes; ++t) {
float *new_truth_ptr = new_truth + t*t_size;
float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size;
float x = old_truth_ptr[0];
if (!x) break;
new_truth_ptr[0] = old_truth_ptr[0];
new_truth_ptr[1] = old_truth_ptr[1];
new_truth_ptr[2] = old_truth_ptr[2];
new_truth_ptr[3] = old_truth_ptr[3];
new_truth_ptr[4] = old_truth_ptr[4];
}
//printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t);
}
#ifdef OPENCV
#include "http_stream.h"
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup,
float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
char **random_paths;
char **mixup_random_paths = NULL;
if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else random_paths = get_random_paths(paths, n, m);
int mixup = use_mixup ? random_gen() % 2 : 0;
//printf("\n mixup = %d \n", mixup);
if (mixup) {
if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else mixup_random_paths = get_random_paths(paths, n, m);
}
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0;
float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5*boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1)
for (i = 0; i < n; ++i) {
float *truth = (float*)calloc(5 * boxes, sizeof(float));
const char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i];
int flag = (c >= 3);
mat_cv *src;
src = load_image_mat_cv(filename, flag);
if (src == NULL) {
if (check_mistakes) getchar();
continue;
}
int oh = get_height_mat(src);
int ow = get_width_mat(src);
int dw = (ow*jitter);
int dh = (oh*jitter);
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
r_scale = random_float();
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
flip = use_flip ? random_gen() % 2 : 0;
//blur = rand_int(0, 1) ? (use_blur) : 0;
int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image
if (tmp_blur == 2) blur = use_blur;
else blur = tmp_blur;
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
//printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh);
float scale = rand_precalc_random(.25, 2, r_scale); // unused currently
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh)/2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow)/2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
if (min_w_h / 8 < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small
image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp,
blur, boxes, d.y.vals[i]);
if (i_mixup) {
image old_img = ai;
old_img.data = d.X.vals[i];
//show_image(ai, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images_cv(ai, 0.5, old_img, 0.5);
blend_truth(truth, boxes, d.y.vals[i]);
free_image(old_img);
}
d.X.vals[i] = ai.data;
memcpy(d.y.vals[i], truth, 5*boxes * sizeof(float));
if (show_imgs)// && i_mixup) // delete i_mixup
{
image tmp_ai = copy_image(ai);
char buff[1000];
sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*ai.w;
int right = (b.x + b.w / 2.)*ai.w;
int top = (b.y - b.h / 2.)*ai.h;
int bot = (b.y + b.h / 2.)*ai.h;
draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(tmp_ai, buff);
if (show_imgs == 1) {
//char buff_src[1000];
//sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen());
//show_image_mat(src, buff_src);
show_image(tmp_ai, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n");
free_image(tmp_ai);
}
release_mat(&src);
free(truth);
}
}
free(random_paths);
if(mixup_random_paths) free(mixup_random_paths);
return d;
}
#else // OPENCV
void blend_images(image new_img, float alpha, image old_img, float beta)
{
int i;
int data_size = new_img.w * new_img.h * new_img.c;
#pragma omp parallel for
for (i = 0; i < data_size; ++i)
new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter,
float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
char **random_paths;
char **mixup_random_paths = NULL;
if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else random_paths = get_random_paths(paths, n, m);
int mixup = use_mixup ? random_gen() % 2 : 0;
//printf("\n mixup = %d \n", mixup);
if (mixup) {
if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else mixup_random_paths = get_random_paths(paths, n, m);
}
int i;
data d = { 0 };
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale;
float dhue = 0, dsat = 0, dexp = 0, flip = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5 * boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0;
for (i = 0; i < n; ++i) {
float *truth = (float*)calloc(5 * boxes, sizeof(float));
char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i];
image orig = load_image(filename, 0, 0, c);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
r_scale = random_float();
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
flip = use_flip ? random_gen() % 2 : 0;
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
float scale = rand_precalc_random(.25, 2, r_scale); // unused currently
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh) / 2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow) / 2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
image sized = resize_image(cropped, w, h);
if (flip) flip_image(sized);
distort_image(sized, dhue, dsat, dexp);
//random_distort_image(sized, hue, saturation, exposure);
fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
if (i_mixup) {
image old_img = sized;
old_img.data = d.X.vals[i];
//show_image(sized, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images(sized, 0.5, old_img, 0.5);
blend_truth(truth, boxes, d.y.vals[i]);
free_image(old_img);
}
d.X.vals[i] = sized.data;
memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float));
if (show_imgs)// && i_mixup)
{
char buff[1000];
sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*sized.w;
int right = (b.x + b.w / 2.)*sized.w;
int top = (b.y - b.h / 2.)*sized.h;
int bot = (b.y + b.h / 2.)*sized.h;
draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(sized, buff);
if (show_imgs == 1) {
show_image(sized, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n");
//getchar();
}
free_image(orig);
free_image(cropped);
free(truth);
}
}
free(random_paths);
if (mixup_random_paths) free(mixup_random_paths);
return d;
}
#endif // OPENCV
void *load_thread(void *ptr)
{
//srand(time(0));
//printf("Loading data: %d\n", random_gen());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.blur, a.mixup, a.jitter,
a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
}else if (a.type == LETTERBOX_DATA) {
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
//srand(time(0));
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data* buffers = (data*)calloc(args.threads, sizeof(data));
pthread_t* threads = (pthread_t*)calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = (float**)calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = (float**)calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = random_gen()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = (float**)calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data newdata = concat_data(d[i], out);
free_data(out);
out = newdata;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = random_gen()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i+b*10000][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = random_gen()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = (float**)calloc(num, sizeof(float*));
r.y.vals = (float**)calloc(num, sizeof(float*));
int i;
for(i = 0; i < num; ++i){
int index = random_gen()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data* split = (data*)calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = (float**)calloc(train.X.rows, sizeof(float*));
test.X.vals = (float**)calloc(test.X.rows, sizeof(float*));
train.y.vals = (float**)calloc(train.y.rows, sizeof(float*));
test.y.vals = (float**)calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
pooling_3x3_pack4.h |
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void pooling3x3s2_max_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__m128 _r00 = _mm_loadu_ps(r0);
__m128 _r01 = _mm_loadu_ps(r0 + 4);
__m128 _r02 = _mm_loadu_ps(r0 + 8);
__m128 _r10 = _mm_loadu_ps(r1);
__m128 _r11 = _mm_loadu_ps(r1 + 4);
__m128 _r12 = _mm_loadu_ps(r1 + 8);
__m128 _r20 = _mm_loadu_ps(r2);
__m128 _r21 = _mm_loadu_ps(r2 + 4);
__m128 _r22 = _mm_loadu_ps(r2 + 8);
__m128 _max00 = _mm_max_ps(_r00, _r01);
_max00 = _mm_max_ps(_max00, _r02);
_max00 = _mm_max_ps(_max00, _r10);
_max00 = _mm_max_ps(_max00, _r11);
__m128 _max01 = _mm_max_ps(_r12, _r20);
_max01 = _mm_max_ps(_max01, _r21);
_max01 = _mm_max_ps(_max01, _r22);
__m128 _r03 = _mm_loadu_ps(r0 + 12);
__m128 _r04 = _mm_loadu_ps(r0 + 16);
__m128 _r13 = _mm_loadu_ps(r1 + 12);
__m128 _r14 = _mm_loadu_ps(r1 + 16);
__m128 _r23 = _mm_loadu_ps(r2 + 12);
__m128 _r24 = _mm_loadu_ps(r2 + 16);
_mm_storeu_ps(outptr, _mm_max_ps(_max00, _max01));
__m128 _max10 = _mm_max_ps(_r03, _r04);
_max10 = _mm_max_ps(_max10, _r02);
_max10 = _mm_max_ps(_max10, _r13);
_max10 = _mm_max_ps(_max10, _r14);
__m128 _max11 = _mm_max_ps(_r12, _r23);
_max10 = _mm_max_ps(_max10, _r24);
_max10 = _mm_max_ps(_max10, _r22);
_mm_storeu_ps(outptr + 4, _mm_max_ps(_max10, _max11));
r0 += 16;
r1 += 16;
r2 += 16;
outptr += 8;
}
for (; j < outw; j++)
{
__m128 _r00 = _mm_loadu_ps(r0);
__m128 _r01 = _mm_loadu_ps(r0 + 4);
__m128 _r02 = _mm_loadu_ps(r0 + 8);
__m128 _r10 = _mm_loadu_ps(r1);
__m128 _r11 = _mm_loadu_ps(r1 + 4);
__m128 _r12 = _mm_loadu_ps(r1 + 8);
__m128 _r20 = _mm_loadu_ps(r2);
__m128 _r21 = _mm_loadu_ps(r2 + 4);
__m128 _r22 = _mm_loadu_ps(r2 + 8);
__m128 _max0 = _mm_max_ps(_r00, _r01);
_max0 = _mm_max_ps(_max0, _r02);
_max0 = _mm_max_ps(_max0, _r10);
_max0 = _mm_max_ps(_max0, _r11);
__m128 _max1 = _mm_max_ps(_r12, _r20);
_max1 = _mm_max_ps(_max1, _r21);
_max1 = _mm_max_ps(_max1, _r22);
_mm_storeu_ps(outptr, _mm_max_ps(_max0, _max1));
r0 += 8;
r1 += 8;
r2 += 8;
outptr += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
GB_binop__lt_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint16)
// A*D function (colscale): GB (_AxD__lt_uint16)
// D*A function (rowscale): GB (_DxB__lt_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint16)
// C=scalar+B GB (_bind1st__lt_uint16)
// C=scalar+B' GB (_bind1st_tran__lt_uint16)
// C=A+scalar GB (_bind2nd__lt_uint16)
// C=A'+scalar GB (_bind2nd_tran__lt_uint16)
// C type: bool
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_UINT16 || GxB_NO_LT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distort.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/shear.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortImageMethod *method,const size_t number_arguments,
const double *arguments,size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) memset(distort_args,0,12*sizeof(double));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod);
if (image->matte == MagickFalse)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
InheritException(exception,&image->exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
*/
Image
*resize_alpha;
/* distort alpha channel separately */
(void) SeparateImageChannel(tmp_image,TrueAlphaChannel);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_alpha == (Image *) NULL )
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
(void) SetImageAlphaChannel(resize_alpha,DeactivateAlphaChannel);
(void) CompositeImage(resize_image,CopyOpacityCompositeOp,resize_alpha,
0,0);
InheritException(exception,&resize_image->exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->matte=image->matte;
resize_image->compose=image->compose;
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image,DistortImageMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if (method == ResizeDistortion)
{
if (number_arguments != 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t) arguments[0],
(size_t) arguments[1],exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff=GenerateCoefficients(image,&method,number_arguments,arguments,0,
exception);
if (coeff == (double *) NULL)
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidGeometry","`%s' `%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
register ssize_t
i;
char image_gen[MaxTextExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MaxTextExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method)
{
case AffineDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s","DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0],coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3],coeff[4],coeff[5]);
(void) FormatLocaleFile(stderr," %s' \\\n",lookup);
break;
}
case PerspectiveDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
"DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr,"Perspective Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort PerspectiveProjection \\\n '");
for (i=0; i < 4; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for ( ; i < 7; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(),
inverse[7]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr,"Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%.1024s",image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," rr=%+.*g*ii %+.*g*jj + 1;\n",
GetMagickPrecision(),coeff[6],GetMagickPrecision(),coeff[7]);
(void) FormatLocaleFile(stderr,
" xx=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1],
GetMagickPrecision(),coeff[2]);
(void) FormatLocaleFile(stderr,
" yy=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4],
GetMagickPrecision(),coeff[5]);
(void) FormatLocaleFile(stderr," rr%s0 ? %s : blue' \\\n",
coeff[8] < 0.0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
{
(void) FormatLocaleFile(stderr,"BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4],coeff[5],coeff[6],coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",0.5-coeff[3],0.5-
coeff[7]);
(void) FormatLocaleFile(stderr," bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if (coeff[9] != 0)
{
(void) FormatLocaleFile(stderr,
" rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",-2*coeff[9],coeff[4],
-coeff[0]);
(void) FormatLocaleFile(stderr,
" yy=( -bb + sqrt(rt) ) / %lf;\n",coeff[9]);
}
else
(void) FormatLocaleFile(stderr," yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4],coeff[0]);
(void) FormatLocaleFile(stderr,
" xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",-coeff[1],coeff[0],
coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr," (rt < 0 ) ? red : %s'\n",
lookup);
else
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BilinearReverseDistortion:
{
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr,
" xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[0],coeff[1],
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr,
" yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[4],coeff[5],
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr,
"Polynomial (order %lg, terms %lu), FX Equivelent\n",coeff[0],
(unsigned long) nterms);
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n yy =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr,"\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr,"Arc Distort, Internal Coefficients:\n");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr,
" c%.20g = %+lf\n",(double) i,coeff[i]);
(void) FormatLocaleFile(stderr,"Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr," xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*%lf %+lf;\n",coeff[1],
coeff[4]);
(void) FormatLocaleFile(stderr,
" yy=(%lf - hypot(ii,jj)) * %lf;\n",coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr,"Polar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",-coeff[2],-coeff[3]);
(void) FormatLocaleFile(stderr," xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr," yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1],coeff[7] );
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr,
"DePolar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'aa=(i+.5)*%lf %+lf;\n",
coeff[6],+coeff[4]);
(void) FormatLocaleFile(stderr," rr=(j+.5)*%lf %+lf;\n",
coeff[7],+coeff[1]);
(void) FormatLocaleFile(stderr," xx=rr*sin(aa) %+lf;\n",
coeff[2]);
(void) FormatLocaleFile(stderr," yy=rr*cos(aa) %+lf;\n",
coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," aa=atan(ii/%+lf);\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*aa%+lf;\n",
coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=jj*cos(aa)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," ii=ii/%+lf;\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*tan(ii)%+lf;\n",coeff[1],
coeff[2] );
(void) FormatLocaleFile(stderr," yy=jj/cos(ii)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
double
xc,
yc;
/*
NOTE: This does the barrel roll in pixel coords not image coords
The internal distortion must do it in image coordinates, so that is
what the center coeff (8,9) is given in.
*/
xc=((double)image->columns-1.0)/2.0+image->page.x;
yc=((double)image->rows-1.0)/2.0+image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr," -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr," -fx 'xc=%lf; yc=%lf;\n",coeff[8]-
0.5,coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr,
" ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[0],coeff[1],coeff[2],
coeff[3]);
(void) FormatLocaleFile(stderr,
" jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[4],coeff[5],coeff[6],
coeff[7]);
(void) FormatLocaleFile(stderr," v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/*
The user provided a 'scale' expert option will scale the output image size,
by the factor given allowing for super-sampling of the distorted image
space. Any scaling factors must naturally be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s","-define distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
return((Image *) NULL);
}
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass) == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
InheritException(exception,&distort_image->exception);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace);
if (distort_image->background_color.opacity != OpaqueOpacity)
distort_image->matte=MagickTrue;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetMagickPixelPacket(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
MagickPixelPacket
pixel, /* pixel color to assign to distorted image */
invalid; /* the color to assign when distort result is invalid */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register IndexPacket
*magick_restrict indexes;
register ssize_t
i;
register PixelPacket
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(distort_view);
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
GetMagickPixelPacket(distort_image,&invalid);
SetMagickPixelPacket(distort_image,&distort_image->matte_color,
(IndexPacket *) NULL, &invalid);
if (distort_image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&invalid); /* what about other color spaces? */
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) ((coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5);
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 1
/*if ( i == 0 && j == 0 ) {*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelPacket(distort_image,&invalid,q,indexes);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
MagickPixelCompositeBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelPacket(distort_image,&pixel,q,indexes);
}
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DistortImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff=(double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
MagickRealType
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,const ChannelType channel,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o channel: Specify which color values (in RGBKA sequence) are being set.
% This also determines the number of color_values in above.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const ChannelType channel,const SparseColorMethod method,
const size_t number_arguments,const double *arguments,
ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ( channel & RedChannel ) number_colors++;
if ( channel & GreenChannel ) number_colors++;
if ( channel & BlueChannel ) number_colors++;
if ( channel & IndexChannel ) number_colors++;
if ( channel & OpacityChannel ) number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortImageMethod
distort_method;
distort_method=(DistortImageMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
InheritException(exception,&image->exception);
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel; /* pixel to assign to distorted image */
register IndexPacket
*magick_restrict indexes;
register ssize_t
i;
register PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(sparse_view);
GetMagickPixelPacket(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
SetMagickPixelPacket(image,q,indexes,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ( channel & RedChannel ) pixel.red = 0.0;
if ( channel & GreenChannel ) pixel.green = 0.0;
if ( channel & BlueChannel ) pixel.blue = 0.0;
if ( channel & IndexChannel ) pixel.index = 0.0;
if ( channel & OpacityChannel ) pixel.opacity = 0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ( channel & RedChannel )
pixel.red += arguments[x++]*weight;
if ( channel & GreenChannel )
pixel.green += arguments[x++]*weight;
if ( channel & BlueChannel )
pixel.blue += arguments[x++]*weight;
if ( channel & IndexChannel )
pixel.index += arguments[x++]*weight;
if ( channel & OpacityChannel )
pixel.opacity += arguments[x++]*weight;
denominator += weight;
}
if ( channel & RedChannel ) pixel.red /= denominator;
if ( channel & GreenChannel ) pixel.green /= denominator;
if ( channel & BlueChannel ) pixel.blue /= denominator;
if ( channel & IndexChannel ) pixel.index /= denominator;
if ( channel & OpacityChannel ) pixel.opacity /= denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ( channel & RedChannel )
pixel.red=ClampPixel(QuantumRange*pixel.red);
if ( channel & GreenChannel )
pixel.green=ClampPixel(QuantumRange*pixel.green);
if ( channel & BlueChannel )
pixel.blue=ClampPixel(QuantumRange*pixel.blue);
if ( channel & IndexChannel )
pixel.index=ClampPixel(QuantumRange*pixel.index);
if ( channel & OpacityChannel )
pixel.opacity=ClampPixel(QuantumRange*pixel.opacity);
SetPixelPacket(sparse_image,&pixel,q,indexes);
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SparseColorTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
pr26943-4.c | /* PR c++/26943 */
/* { dg-do run } */
extern int omp_set_dynamic (int);
extern void omp_set_nested (int);
extern int omp_get_thread_num (void);
extern void abort (void);
extern void GOMP_barrier (void);
int a = 8, b = 12, c = 16, d = 20, j = 0, l = 0;
char e[10] = "a", f[10] = "b", g[10] = "c", h[10] = "d";
volatile int k;
int
main (void)
{
int i;
omp_set_dynamic (0);
omp_set_nested (1);
#pragma omp parallel num_threads (2) reduction (+:l) \
firstprivate (a, b, c, d, e, f, g, h, j)
if (k == omp_get_thread_num ())
{
#pragma omp parallel for shared (a, e) firstprivate (b, f) \
lastprivate (c, g) private (d, h) \
schedule (static, 1) num_threads (4) \
reduction (+:j)
for (i = 0; i < 4; i++)
{
if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b')
j++;
GOMP_barrier ();
#pragma omp atomic
a += i;
b += i;
c = i;
d = i;
#pragma omp atomic
e[0] += i;
f[0] += i;
g[0] = 'g' + i;
h[0] = 'h' + i;
GOMP_barrier ();
if (a != 8 + 6 || b != 12 + i || c != i || d != i)
j += 8;
if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i)
j += 64;
if (h[0] != 'h' + i)
j += 512;
}
if (j || a != 8 + 6 || b != 12 || c != 3 || d != 20)
++l;
if (e[0] != 'a' + 6 || f[0] != 'b' || g[0] != 'g' + 3 || h[0] != 'd')
l += 8;
}
if (l)
abort ();
if (a != 8 || b != 12 || c != 16 || d != 20)
abort ();
if (e[0] != 'a' || f[0] != 'b' || g[0] != 'c' || h[0] != 'd')
abort ();
return 0;
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% John Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/fx-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/shear.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define LeftShiftOperator 0xf5
#define RightShiftOperator 0xf6
#define LessThanEqualOperator 0xf7
#define GreaterThanEqualOperator 0xf8
#define EqualOperator 0xf9
#define NotEqualOperator 0xfa
#define LogicalAndOperator 0xfb
#define LogicalOrOperator 0xfc
#define ExponentialNotation 0xfd
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *image,const char *expression)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: the expression.
%
*/
MagickExport FxInfo *AcquireFxInfo(const Image *image,const char *expression)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info));
if (fx_info == (FxInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=image;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireCacheView(next);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
if ((strstr(fx_info->expression,"e+") != (char *) NULL) ||
(strstr(fx_info->expression,"e-") != (char *) NULL))
{
/*
Convert scientific notation.
*/
(void) SubstituteString(&fx_info->expression,"0e+","0**10^");
(void) SubstituteString(&fx_info->expression,"1e+","1**10^");
(void) SubstituteString(&fx_info->expression,"2e+","2**10^");
(void) SubstituteString(&fx_info->expression,"3e+","3**10^");
(void) SubstituteString(&fx_info->expression,"4e+","4**10^");
(void) SubstituteString(&fx_info->expression,"5e+","5**10^");
(void) SubstituteString(&fx_info->expression,"6e+","6**10^");
(void) SubstituteString(&fx_info->expression,"7e+","7**10^");
(void) SubstituteString(&fx_info->expression,"8e+","8**10^");
(void) SubstituteString(&fx_info->expression,"9e+","9**10^");
(void) SubstituteString(&fx_info->expression,"0e-","0**10^-");
(void) SubstituteString(&fx_info->expression,"1e-","1**10^-");
(void) SubstituteString(&fx_info->expression,"2e-","2**10^-");
(void) SubstituteString(&fx_info->expression,"3e-","3**10^-");
(void) SubstituteString(&fx_info->expression,"4e-","4**10^-");
(void) SubstituteString(&fx_info->expression,"5e-","5**10^-");
(void) SubstituteString(&fx_info->expression,"6e-","6**10^-");
(void) SubstituteString(&fx_info->expression,"7e-","7**10^-");
(void) SubstituteString(&fx_info->expression,"8e-","8**10^-");
(void) SubstituteString(&fx_info->expression,"9e-","9**10^-");
}
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
/*
Convert complex to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% ExceptionInfo *exception)
% Image *AddNoiseImageChannel(const Image *image,const ChannelType channel,
% const NoiseType noise_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
ExceptionInfo *exception)
{
Image
*noise_image;
noise_image=AddNoiseImageChannel(image,DefaultChannels,noise_type,exception);
return(noise_image);
}
MagickExport Image *AddNoiseImageChannel(const Image *image,
const ChannelType channel,const NoiseType noise_type,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
const char
*option;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
attenuate;
RandomInfo
**restrict random_info;
ssize_t
y;
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse)
{
InheritException(exception,&noise_image->exception);
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
attenuate=1.0;
option=GetImageArtifact(image,"attenuate");
if (option != (char *) NULL)
attenuate=StringToDouble(option);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireCacheView(image);
noise_view=AcquireCacheView(noise_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict noise_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(GenerateDifferentialNoise(random_info[id],
p->red,noise_type,attenuate));
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(GenerateDifferentialNoise(random_info[id],
p->green,noise_type,attenuate));
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(GenerateDifferentialNoise(random_info[id],
p->blue,noise_type,attenuate));
if ((channel & OpacityChannel) != 0)
q->opacity=ClampToQuantum(GenerateDifferentialNoise(random_info[id],
p->opacity,noise_type,attenuate));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
noise_indexes[x]=(IndexPacket) ClampToQuantum(GenerateDifferentialNoise(
random_info[id],indexes[x],noise_type,attenuate));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AddNoiseImage)
#endif
proceed=SetImageProgress(image,AddNoiseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
shift_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass) == MagickFalse)
{
InheritException(exception,&shift_image->exception);
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
shift_view=AcquireCacheView(shift_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
Quantum
quantum;
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetRedPixelComponent(p);
if (p->green < quantum)
quantum=GetGreenPixelComponent(p);
if (p->blue < quantum)
quantum=GetBluePixelComponent(p);
pixel.red=0.5*(p->red+factor*quantum);
pixel.green=0.5*(p->green+factor*quantum);
pixel.blue=0.5*(p->blue+factor*quantum);
quantum=GetRedPixelComponent(p);
if (p->green > quantum)
quantum=GetGreenPixelComponent(p);
if (p->blue > quantum)
quantum=GetBluePixelComponent(p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetRedPixelComponent(q,ClampRedPixelComponent(&pixel));
SetGreenPixelComponent(q,ClampGreenPixelComponent(&pixel));
SetBluePixelComponent(q,ClampBluePixelComponent(&pixel));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlueShiftImage)
#endif
proceed=SetImageProgress(image,BlueShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*clone_image,
*edge_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageType(clone_image,GrayscaleType);
edge_image=EdgeImage(clone_image,radius,exception);
clone_image=DestroyImage(clone_image);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(charcoal_image);
(void) NegateImage(charcoal_image,MagickFalse);
(void) SetImageType(charcoal_image,GrayscaleType);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *opacity,
% const PixelPacket colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A character string indicating the level of opacity as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *opacity,
const PixelPacket colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
CacheView
*colorize_view,
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
pixel;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
colorize_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass) == MagickFalse)
{
InheritException(exception,&colorize_image->exception);
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if (opacity == (const char *) NULL)
return(colorize_image);
/*
Determine RGB values of the pen color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
pixel.green=geometry_info.rho;
pixel.blue=geometry_info.rho;
pixel.opacity=(MagickRealType) OpaqueOpacity;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
colorize_view=AcquireCacheView(colorize_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(colorize_view,0,y,colorize_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
q->red=(Quantum) ((p->red*(100.0-pixel.red)+
colorize.red*pixel.red)/100.0);
q->green=(Quantum) ((p->green*(100.0-pixel.green)+
colorize.green*pixel.green)/100.0);
q->blue=(Quantum) ((p->blue*(100.0-pixel.blue)+
colorize.blue*pixel.blue)/100.0);
q->opacity=(Quantum) ((p->opacity*(100.0-pixel.opacity)+
colorize.opacity*pixel.opacity)/100.0);
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(colorize_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorizeImage)
#endif
proceed=SetImageProgress(image,ColorizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
colorize_view=DestroyCacheView(colorize_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Create color matrix.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass) == MagickFalse)
{
InheritException(exception,&color_image->exception);
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatMagickString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatMagickString(format,MaxTextExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
ColorMatrix image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
color_view=AcquireCacheView(color_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickRealType
pixel;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
register IndexPacket
*restrict color_indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
color_indexes=GetCacheViewAuthenticIndexQueue(color_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
pixel=ColorMatrix[v][0]*p->red+ColorMatrix[v][1]*p->green+
ColorMatrix[v][2]*p->blue;
if (image->matte != MagickFalse)
pixel+=ColorMatrix[v][3]*(QuantumRange-p->opacity);
if (image->colorspace == CMYKColorspace)
pixel+=ColorMatrix[v][4]*indexes[x];
pixel+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: q->red=ClampToQuantum(pixel); break;
case 1: q->green=ClampToQuantum(pixel); break;
case 2: q->blue=ClampToQuantum(pixel); break;
case 3:
{
if (image->matte != MagickFalse)
q->opacity=ClampToQuantum(QuantumRange-pixel);
break;
}
case 4:
{
if (image->colorspace == CMYKColorspace)
color_indexes[x]=ClampToQuantum(pixel);
break;
}
}
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorMatrixImage)
#endif
proceed=SetImageProgress(image,ColorMatrixImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% MagickRealType FxEvaluateChannelExpression(FxInfo *fx_info,
% const ChannelType channel,const ssize_t x,const ssize_t y,
% MagickRealType *alpha,Exceptioninfo *exception)
% MagickRealType FxEvaluateExpression(FxInfo *fx_info,
% MagickRealType *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static MagickRealType FxChannelStatistics(FxInfo *fx_info,const Image *image,
ChannelType channel,const char *symbol,ExceptionInfo *exception)
{
char
key[MaxTextExtent],
statistic[MaxTextExtent];
const char
*value;
register const char
*p;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
switch (*++p) /* e.g. depth.r */
{
case 'r': channel=RedChannel; break;
case 'g': channel=GreenChannel; break;
case 'b': channel=BlueChannel; break;
case 'c': channel=CyanChannel; break;
case 'm': channel=MagentaChannel; break;
case 'y': channel=YellowChannel; break;
case 'k': channel=BlackChannel; break;
default: break;
}
(void) FormatMagickString(key,MaxTextExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=(const char *) GetValueFromSplayTree(fx_info->symbols,key);
if (value != (const char *) NULL)
return(QuantumScale*StringToDouble(value));
(void) DeleteNodeFromSplayTree(fx_info->symbols,key);
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageChannelDepth(image,channel,exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%.20g",(double)
depth);
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness,
exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%g",kurtosis);
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%g",maxima);
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%g",mean);
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%g",minima);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness,
exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%g",skewness);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%g",
standard_deviation);
}
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(key),
ConstantString(statistic));
return(QuantumScale*StringToDouble(statistic));
}
static MagickRealType
FxEvaluateSubexpression(FxInfo *,const ChannelType,const ssize_t,
const ssize_t,const char *,MagickRealType *,ExceptionInfo *);
static inline MagickRealType FxMax(FxInfo *fx_info,const ChannelType channel,
const ssize_t x,const ssize_t y,const char *expression,
ExceptionInfo *exception)
{
MagickRealType
alpha,
beta;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression,&beta,exception);
return((MagickRealType) MagickMax((double) alpha,(double) beta));
}
static inline MagickRealType FxMin(FxInfo *fx_info,ChannelType channel,
const ssize_t x,const ssize_t y,const char *expression,
ExceptionInfo *exception)
{
MagickRealType
alpha,
beta;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression,&beta,exception);
return((MagickRealType) MagickMin((double) alpha,(double) beta));
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static MagickRealType FxGetSymbol(FxInfo *fx_info,const ChannelType channel,
const ssize_t x,const ssize_t y,const char *expression,
ExceptionInfo *exception)
{
char
*q,
subexpression[MaxTextExtent],
symbol[MaxTextExtent];
const char
*p,
*value;
Image
*image;
InterpolatePixelMethod
interpolate_method;
MagickPixelPacket
pixel;
MagickRealType
alpha,
beta;
PointInfo
point;
register ssize_t
i;
size_t
length;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) *(p+1)) == 0)
{
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&beta,exception);
i=(ssize_t) (alpha+0.5);
p++;
}
if (*p == '.')
p++;
}
if ((isalpha((int) *(p+1)) == 0) && (*p == 'p'))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&beta,exception);
point.x=alpha;
point.y=beta;
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&beta,exception);
point.x+=alpha;
point.y+=beta;
p++;
}
if (*p == '.')
p++;
}
}
length=GetImageListLength(fx_info->images);
while (i < 0)
i+=(ssize_t) length;
i%=length;
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
interpolate_method=image->interpolate == UndefinedInterpolatePixel ?
NearestNeighborInterpolatePixel : image->interpolate;
(void) InterpolateMagickPixelPacket(image,fx_info->view[i],interpolate_method,
point.x,point.y,&pixel,exception);
if ((strlen(p) > 2) &&
(LocaleCompare(p,"intensity") != 0) &&
(LocaleCompare(p,"luminance") != 0) &&
(LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MaxTextExtent];
(void) CopyMagickString(name,p,MaxTextExtent);
for (q=name+(strlen(name)-1); q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
if ((strlen(name) > 2) &&
(GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL))
{
MagickPixelPacket
*color;
color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors,
name);
if (color != (MagickPixelPacket *) NULL)
{
pixel=(*color);
p+=strlen(name);
}
else
if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(name),
CloneMagickPixelPacket(&pixel));
p+=strlen(name);
}
}
}
(void) CopyMagickString(symbol,p,MaxTextExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedChannel: return(QuantumScale*pixel.red);
case GreenChannel: return(QuantumScale*pixel.green);
case BlueChannel: return(QuantumScale*pixel.blue);
case OpacityChannel:
{
MagickRealType
alpha;
if (pixel.matte == MagickFalse)
return(1.0);
alpha=(MagickRealType) (QuantumScale*GetAlphaPixelComponent(&pixel));
return(alpha);
}
case IndexChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
case DefaultChannels:
{
return(QuantumScale*MagickPixelIntensityToQuantum(&pixel));
}
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((MagickRealType) (QuantumScale*GetAlphaPixelComponent(&pixel)));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(symbol,"channel",7) == 0)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case OpacityChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BlueChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case OpacityChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case IndexChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
return(0.0);
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((MagickRealType) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"intensity") == 0)
return(QuantumScale*MagickPixelIntensityToQuantum(&pixel));
if (LocaleCompare(symbol,"i") == 0)
return((MagickRealType) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((MagickRealType) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.2126*pixel.red+0.7152*pixel.green+0.0722*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((MagickRealType) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.opacity);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((MagickRealType) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((MagickRealType) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((MagickRealType) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((MagickRealType) image->page.y);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((MagickRealType) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((MagickRealType) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
{
MagickRealType
depth;
depth=(MagickRealType) GetImageChannelDepth(image,channel,
fx_info->exception);
return(depth);
}
break;
}
default:
break;
}
value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (value != (const char *) NULL)
return((MagickRealType) StringToDouble(value));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",symbol);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=0;
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while (*expression != '\0')
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"atan2",5) == 0)
{
expression+=5;
break;
}
break;
}
case 'J':
case 'j':
{
if ((LocaleNCompare(expression,"j0",2) == 0) ||
(LocaleNCompare(expression,"j1",2) == 0))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit((int) ((char) c)) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((char) *expression)) != 0) ||
(strchr("(",(int) *expression) != (char *) NULL)) ||
((isdigit((int) ((char) c)) == 0) &&
(isdigit((int) ((char) *expression)) != 0))) &&
(strchr("xy",(int) *expression) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static MagickRealType FxEvaluateSubexpression(FxInfo *fx_info,
const ChannelType channel,const ssize_t x,const ssize_t y,
const char *expression,MagickRealType *beta,ExceptionInfo *exception)
{
char
*q,
subexpression[MaxTextExtent];
MagickRealType
alpha,
gamma;
register const char
*p;
*beta=0.0;
if (exception->severity != UndefinedException)
return(0.0);
while (isspace((int) *expression) != 0)
expression++;
if (*expression == '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"MissingExpression","`%s'",expression);
return(0.0);
}
*subexpression='\0';
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta,
exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) (~(size_t) *beta);
return(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow((double) alpha,(double) FxEvaluateSubexpression(fx_info,
channel,x,y,++p,beta,exception));
return(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
if (*beta == 0.0)
{
if (exception->severity == UndefinedException)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(alpha/(*beta));
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=fabs(floor(((double) *beta)+0.5));
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(fmod((double) alpha,(double) *beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha-(*beta));
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((size_t) (alpha+0.5) << (size_t)
(gamma+0.5));
return(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((size_t) (alpha+0.5) >> (size_t)
(gamma+0.5));
return(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(fabs(alpha-(*beta)) <= MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(fabs(alpha-(*beta)) > MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((size_t) (alpha+0.5) & (size_t)
(gamma+0.5));
return(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((size_t) (alpha+0.5) | (size_t)
(gamma+0.5));
return(*beta);
}
case LogicalAndOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(alpha > 0.0) && (gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case LogicalOrOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(alpha > 0.0) || (gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case '?':
{
MagickRealType
gamma;
(void) CopyMagickString(subexpression,++p,MaxTextExtent);
q=subexpression;
p=StringToken(":",&q);
if (q == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
if (fabs((double) alpha) > MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,beta,exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,beta,exception);
return(gamma);
}
case '=':
{
char
numeric[MaxTextExtent];
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
(void) FormatMagickString(numeric,MaxTextExtent,"%g",(double)
*beta);
(void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(
subexpression),ConstantString(numeric));
return(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,beta,
exception);
return(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
(void) CopyMagickString(subexpression,expression+1,MaxTextExtent);
subexpression[strlen(subexpression)-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta,
exception);
return(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta,
exception);
return(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta,
exception);
return(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta,
exception);
return((MagickRealType) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"abs",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) fabs((double) alpha));
}
if (LocaleNCompare(expression,"acos",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) acos((double) alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"airy",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
if (alpha == 0.0)
return(1.0);
gamma=2.0*j1((double) (MagickPI*alpha))/(MagickPI*alpha);
return(gamma*gamma);
}
#endif
if (LocaleNCompare(expression,"asin",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) asin((double) alpha));
}
if (LocaleNCompare(expression,"alt",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) atan2((double) alpha,(double) *beta));
}
if (LocaleNCompare(expression,"atan",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) atan((double) alpha));
}
if (LocaleCompare(expression,"a") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(expression,"ceil",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) ceil((double) alpha));
}
if (LocaleNCompare(expression,"cosh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) cosh((double) alpha));
}
if (LocaleNCompare(expression,"cos",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) cos((double) alpha));
}
if (LocaleCompare(expression,"c") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(expression,"debug",5) == 0)
{
const char
*type;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
if (fx_info->images->colorspace == CMYKColorspace)
switch (channel)
{
case CyanChannel: type="cyan"; break;
case MagentaChannel: type="magenta"; break;
case YellowChannel: type="yellow"; break;
case OpacityChannel: type="opacity"; break;
case BlackChannel: type="black"; break;
default: type="unknown"; break;
}
else
switch (channel)
{
case RedChannel: type="red"; break;
case GreenChannel: type="green"; break;
case BlueChannel: type="blue"; break;
case OpacityChannel: type="opacity"; break;
default: type="unknown"; break;
}
(void) CopyMagickString(subexpression,expression+6,MaxTextExtent);
if (strlen(subexpression) > 1)
subexpression[strlen(subexpression)-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) fprintf(fx_info->file,"%s[%.20g,%.20g].%s: %s=%.*g\n",
fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),(double) alpha);
return(0.0);
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
return((MagickRealType) MagickEpsilon);
if (LocaleNCompare(expression,"exp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) exp((double) alpha));
}
if (LocaleCompare(expression,"e") == 0)
return((MagickRealType) 2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (LocaleNCompare(expression,"floor",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) floor((double) alpha));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(expression,"g") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleCompare(expression,"hue") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"hypot",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) hypot((double) alpha,(double) *beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(expression,"intensity") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"int",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) floor(alpha));
}
if (LocaleCompare(expression,"i") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (LocaleNCompare(expression,"j0",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta,
exception);
return((MagickRealType) j0((double) alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"j1",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta,
exception);
return((MagickRealType) j1((double) alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"jinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
if (alpha == 0.0)
return(1.0);
gamma=(MagickRealType) (2.0*j1((double) (MagickPI*alpha))/
(MagickPI*alpha));
return(gamma);
}
#endif
break;
}
case 'L':
case 'l':
{
if (LocaleNCompare(expression,"ln",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta,
exception);
return((MagickRealType) log((double) alpha));
}
if (LocaleNCompare(expression,"logtwo",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,beta,
exception);
return((MagickRealType) log10((double) alpha))/log10(2.0);
}
if (LocaleNCompare(expression,"log",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) log10((double) alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
return((MagickRealType) QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (LocaleNCompare(expression,"max",3) == 0)
return(FxMax(fx_info,channel,x,y,expression+3,exception));
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (LocaleNCompare(expression,"min",3) == 0)
return(FxMin(fx_info,channel,x,y,expression+3,exception));
if (LocaleNCompare(expression,"mod",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) fmod((double) alpha,(double) *beta));
}
if (LocaleCompare(expression,"m") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(expression,"n") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
return(1.0);
if (LocaleCompare(expression,"o") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"pi") == 0)
return((MagickRealType) MagickPI);
if (LocaleNCompare(expression,"pow",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) pow((double) alpha,(double) *beta));
}
if (LocaleCompare(expression,"p") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
return((MagickRealType) QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
return((MagickRealType) QuantumScale);
break;
}
case 'R':
case 'r':
{
if (LocaleNCompare(expression,"rand",4) == 0)
return((MagickRealType) GetPseudoRandomValue(fx_info->random_info));
if (LocaleNCompare(expression,"round",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) floor((double) alpha+0.5));
}
if (LocaleCompare(expression,"r") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"sign",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return(alpha < 0.0 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"sinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
if (alpha == 0)
return(1.0);
gamma=(MagickRealType) (sin((double) (MagickPI*alpha))/
(MagickPI*alpha));
return(gamma);
}
if (LocaleNCompare(expression,"sinh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) sinh((double) alpha));
}
if (LocaleNCompare(expression,"sin",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) sin((double) alpha));
}
if (LocaleNCompare(expression,"sqrt",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) sqrt((double) alpha));
}
if (LocaleCompare(expression,"s") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'T':
case 't':
{
if (LocaleNCompare(expression,"tanh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) tanh((double) alpha));
}
if (LocaleNCompare(expression,"tan",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) tan((double) alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
return(0.0);
if (LocaleNCompare(expression,"trunc",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
if (alpha >= 0.0)
return((MagickRealType) floor((double) alpha));
return((MagickRealType) ceil((double) alpha));
}
if (LocaleCompare(expression,"t") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(expression,"w") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
default:
break;
}
q=(char *) expression;
alpha=strtod(expression,&q);
if (q == expression)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
return(alpha);
}
MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
MagickRealType *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
MagickRealType *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception);
fx_info->file=file;
return(status);
}
MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const ChannelType channel,const ssize_t x,const ssize_t y,
MagickRealType *alpha,ExceptionInfo *exception)
{
MagickRealType
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&beta,
exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
% Image *FxImageChannel(const Image *image,const ChannelType channel,
% const char *expression,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
FxInfo
**fx_info;
MagickRealType
alpha;
register ssize_t
i;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
return((FxInfo **) NULL);
(void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
fx_info[i]=AcquireFxInfo(image,fx_expression);
if (fx_info[i] == (FxInfo *) NULL)
return(DestroyFxThreadSet(fx_info));
(void) FxPreprocessExpression(fx_info[i],&alpha,fx_info[i]->exception);
}
fx_expression=DestroyString(fx_expression);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
Image
*fx_image;
fx_image=FxImageChannel(image,GrayChannel,expression,exception);
return(fx_image);
}
MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel,
const char *expression,ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view;
FxInfo
**restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
alpha;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse)
{
InheritException(exception,&fx_image->exception);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
{
fx_image=DestroyImage(fx_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
status=FxPreprocessExpression(fx_info[0],&alpha,exception);
if (status == MagickFalse)
{
fx_image=DestroyImage(fx_image);
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
fx_view=AcquireCacheView(fx_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickRealType
alpha;
register IndexPacket
*restrict fx_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view);
alpha=0.0;
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y,
&alpha,exception);
q->red=ClampToQuantum((MagickRealType) QuantumRange*alpha);
}
if ((channel & GreenChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y,
&alpha,exception);
q->green=ClampToQuantum((MagickRealType) QuantumRange*alpha);
}
if ((channel & BlueChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y,
&alpha,exception);
q->blue=ClampToQuantum((MagickRealType) QuantumRange*alpha);
}
if ((channel & OpacityChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y,
&alpha,exception);
if (image->matte == MagickFalse)
q->opacity=ClampToQuantum((MagickRealType) QuantumRange*alpha);
else
q->opacity=ClampToQuantum((MagickRealType) (QuantumRange-
QuantumRange*alpha));
}
if (((channel & IndexChannel) != 0) &&
(fx_image->colorspace == CMYKColorspace))
{
(void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y,
&alpha,exception);
fx_indexes[x]=(IndexPacket) ClampToQuantum((MagickRealType)
QuantumRange*alpha);
}
q++;
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxImageChannel)
#endif
proceed=SetImageProgress(image,FxImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*image_view,
*implode_view;
Image
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
implode_image=CloneImage(image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(implode_image,DirectClass) == MagickFalse)
{
InheritException(exception,&implode_image->exception);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
if (implode_image->background_color.opacity != OpaqueOpacity)
implode_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*image->columns;
center.y=0.5*image->rows;
radius=center.x;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
{
scale.x=(double) image->rows/(double) image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(implode_image,&zero);
image_view=AcquireCacheView(image);
implode_view=AcquireCacheView(implode_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
MagickRealType
distance;
PointInfo
delta;
register IndexPacket
*restrict implode_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
implode_indexes=GetCacheViewAuthenticIndexQueue(implode_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin((double) (MagickPI*sqrt((double) distance)/
radius/2)),-amount);
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) (factor*delta.x/scale.x+
center.x),(double) (factor*delta.y/scale.y+center.y),&pixel,
exception);
SetPixelPacket(implode_image,&pixel,q,implode_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ImplodeImage)
#endif
proceed=SetImageProgress(image,ImplodeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,
const size_t number_frames,ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
MagickRealType
alpha,
beta;
register const Image
*next;
register ssize_t
i;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (i=1; i < (ssize_t) number_frames; i++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) i,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (i=0; i < (ssize_t) number_frames; i++)
{
CacheView
*image_view,
*morph_view;
beta=(MagickRealType) (i+1.0)/(MagickRealType) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*
next->rows+beta*GetNextImageInList(next)->rows+0.5),
next->filter,next->blur,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
if (SetImageStorageClass(morph_image,DirectClass) == MagickFalse)
{
InheritException(exception,&morph_image->exception);
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,
GetNextImageInList(next)->blur,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireCacheView(morph_image);
morph_view=AcquireCacheView(morph_images);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
q->red=ClampToQuantum(alpha*q->red+beta*GetRedPixelComponent(p));
q->green=ClampToQuantum(alpha*q->green+beta*
GetGreenPixelComponent(p));
q->blue=ClampToQuantum(alpha*q->blue+beta*GetBluePixelComponent(p));
q->opacity=ClampToQuantum(alpha*q->opacity+beta*
GetOpacityPixelComponent(p));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (i < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphImages)
#endif
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *random_info,
const MagickRealType pixel,const MagickRealType noise)
{
Quantum
plasma;
plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)-
noise/2.0);
return(plasma);
}
MagickExport MagickBooleanType PlasmaImageProxy(Image *image,
CacheView *image_view,RandomInfo *random_info,const SegmentInfo *segment,
size_t attenuate,size_t depth)
{
ExceptionInfo
*exception;
MagickRealType
plasma;
PixelPacket
u,
v;
ssize_t
x,
x_mid,
y,
y_mid;
if (((segment->x2-segment->x1) == 0.0) && ((segment->y2-segment->y1) == 0.0))
return(MagickTrue);
if (depth != 0)
{
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,random_info,&local_info,
attenuate,depth);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
(void) PlasmaImageProxy(image,image_view,random_info,&local_info,
attenuate,depth);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,random_info,&local_info,
attenuate,depth);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
return(PlasmaImageProxy(image,image_view,random_info,&local_info,
attenuate,depth));
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((segment->x1 == (double) x_mid) && (segment->x2 == (double) x_mid) &&
(segment->y1 == (double) y_mid) && (segment->y2 == (double) y_mid))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
exception=(&image->exception);
plasma=(MagickRealType) QuantumRange/(2.0*attenuate);
if ((segment->x1 != (double) x_mid) || (segment->x2 != (double) x_mid))
{
register PixelPacket
*restrict q;
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
(void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t)
ceil(segment->y1-0.5),&u,exception);
(void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t)
ceil(segment->y2-0.5),&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
q->red=PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,
plasma);
q->green=PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/2.0,
plasma);
q->blue=PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0,
plasma);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
if (segment->x1 != segment->x2)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
(void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t)
ceil(segment->y1-0.5),&u,exception);
(void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t)
ceil(segment->y2-0.5),&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
q->red=PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,
plasma);
q->green=PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/
2.0,plasma);
q->blue=PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0,
plasma);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((segment->y1 != (double) y_mid) || (segment->y2 != (double) y_mid))
{
if ((segment->x1 != (double) x_mid) || (segment->y2 != (double) y_mid))
{
register PixelPacket
*restrict q;
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
ceil(segment->x1-0.5),y,&u,exception);
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
ceil(segment->x2-0.5),y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
q->red=PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,
plasma);
q->green=PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/
2.0,plasma);
q->blue=PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0,
plasma);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (segment->y1 != segment->y2)
{
register PixelPacket
*restrict q;
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
ceil(segment->x1-0.5),y,&u,exception);
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
ceil(segment->x2-0.5),y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
q->red=PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,
plasma);
q->green=PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/
2.0,plasma);
q->blue=PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0,
plasma);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((segment->x1 != segment->x2) || (segment->y1 != segment->y2))
{
register PixelPacket
*restrict q;
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
(void) GetOneVirtualPixel(image,x,y,&u,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
(void) GetOneCacheViewVirtualPixel(image_view,x,y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
q->red=PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,
plasma);
q->green=PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/2.0,
plasma);
q->blue=PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0,
plasma);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (((segment->x2-segment->x1) < 3.0) && ((segment->y2-segment->y1) < 3.0))
return(MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth)
{
CacheView
*image_view;
MagickBooleanType
status;
RandomInfo
*random_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image_view=AcquireCacheView(image);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,random_info,segment,attenuate,depth);
random_info=DestroyRandomInfo(random_info);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the AnnotateImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const double angle,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const double angle,ExceptionInfo *exception)
{
const char
*value;
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
value=GetImageProperty(image,"Caption");
if (value != (const char *) NULL)
{
char
*caption,
geometry[MaxTextExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
caption=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,
value);
(void) CloneString(&annotate_info->text,caption);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics,
&caption);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5));
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image);
(void) CloneString(&annotate_info->text,caption);
(void) FormatMagickString(geometry,MaxTextExtent,"+0+%g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
caption=DestroyString(caption);
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image);
(void) CompositeImage(picture_image,OverCompositeOp,image,quantum,quantum);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,OverCompositeOp,caption_image,
quantum,(ssize_t) (image->rows+3*quantum/2));
caption_image=DestroyImage(caption_image);
}
(void) QueryColorDatabase("none",&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
InheritException(&bend_image->exception,exception);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,OverCompositeOp,picture_image,
(ssize_t) (-0.01*picture_image->columns/2.0),0L);
picture_image=DestroyImage(picture_image);
(void) QueryColorDatabase("none",&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
sepia_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass) == MagickFalse)
{
InheritException(exception,&sepia_image->exception);
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
sepia_view=AcquireCacheView(sepia_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity,
tone;
intensity=(MagickRealType) PixelIntensityToQuantum(p);
tone=intensity > threshold ? (MagickRealType) QuantumRange : intensity+
(MagickRealType) QuantumRange-threshold;
q->red=ClampToQuantum(tone);
tone=intensity > (7.0*threshold/6.0) ? (MagickRealType) QuantumRange :
intensity+(MagickRealType) QuantumRange-7.0*threshold/6.0;
q->green=ClampToQuantum(tone);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
q->blue=ClampToQuantum(tone);
tone=threshold/7.0;
if ((MagickRealType) q->green < tone)
q->green=ClampToQuantum(tone);
if ((MagickRealType) q->blue < tone)
q->blue=ClampToQuantum(tone);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SepiaToneImage)
#endif
proceed=SetImageProgress(image,SepiaToneImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image);
(void) ContrastImage(sepia_image,MagickTrue);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double opacity,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double opacity,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod);
clone_image->compose=OverCompositeOp;
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorDatabase("none",&clone_image->border_color,exception);
border_image=BorderImage(clone_image,&border_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->matte == MagickFalse)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel);
/*
Shadow image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(border_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
q->red=border_image->background_color.red;
q->green=border_image->background_color.green;
q->blue=border_image->background_color.blue;
if (border_image->matte == MagickFalse)
q->opacity=border_image->background_color.opacity;
else
q->opacity=ClampToQuantum((MagickRealType) (QuantumRange-
GetAlphaPixelComponent(q)*opacity/100.0));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ShadowImage)
#endif
proceed=SetImageProgress(image,ShadowImageTag,progress++,
border_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shadow_image=BlurImageChannel(border_image,AlphaChannel,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
MagickPixelPacket
zero;
RandomInfo
**restrict random_info;
ssize_t
y;
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
GetMagickPixelPacket(random_image,&zero);
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireCacheView(random_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(random_view);
pixel=zero;
for (x=0; x < (ssize_t) random_image->columns; x++)
{
pixel.red=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
pixel.green=pixel.red;
pixel.blue=pixel.red;
if (image->colorspace == CMYKColorspace)
pixel.index=pixel.red;
SetPixelPacket(random_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(dodge_image);
(void) NegateImage(dodge_image,MagickFalse);
(void) TransformImage(&dodge_image,(char *) NULL,"50%");
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,ColorDodgeCompositeOp,dodge_image,0,0);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,BlendCompositeOp,blend_image,0,0);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((MagickRealType) image->colormap[i].red > threshold)
image->colormap[i].red=(Quantum) QuantumRange-image->colormap[i].red;
if ((MagickRealType) image->colormap[i].green > threshold)
image->colormap[i].green=(Quantum) QuantumRange-
image->colormap[i].green;
if ((MagickRealType) image->colormap[i].blue > threshold)
image->colormap[i].blue=(Quantum) QuantumRange-
image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((MagickRealType) q->red > threshold)
q->red=(Quantum) QuantumRange-q->red;
if ((MagickRealType) q->green > threshold)
q->green=(Quantum) QuantumRange-q->green;
if ((MagickRealType) q->blue > threshold)
q->blue=(Quantum) QuantumRange-q->blue;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SolarizeImage)
#endif
proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (alpha)=(Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelPacket
pixel;
register PixelPacket
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stegano_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stegano_image->exception);
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=image->offset;
status=MagickTrue;
watermark_view=AcquireCacheView(watermark);
stegano_view=AcquireCacheView(stegano_image);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
(void) GetOneCacheViewVirtualPixel(watermark_view,x,y,&pixel,exception);
if ((k/(ssize_t) stegano_image->columns) >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (PixelPacket *) NULL)
break;
switch (c)
{
case 0:
{
SetBit(q->red,j,GetBit(PixelIntensityToQuantum(&pixel),i));
break;
}
case 1:
{
SetBit(q->green,j,GetBit(PixelIntensityToQuantum(&pixel),i));
break;
}
case 2:
{
SetBit(q->blue,j,GetBit(PixelIntensityToQuantum(&pixel),i));
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (stegano_image->storage_class == PseudoClass)
(void) SyncImage(stegano_image);
if (status == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(right_image != (const Image *) NULL);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stereo_image->exception);
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
x;
register PixelPacket
*restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
r->red=GetRedPixelComponent(p);
r->green=q->green;
r->blue=q->blue;
r->opacity=(Quantum) ((p->opacity+q->opacity)/2);
p++;
q++;
r++;
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*image_view,
*swirl_view;
Image
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
swirl_image=CloneImage(image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(swirl_image,DirectClass) == MagickFalse)
{
InheritException(exception,&swirl_image->exception);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.opacity != OpaqueOpacity)
swirl_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
scale.x=(double) image->rows/(double) image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(swirl_image,&zero);
image_view=AcquireCacheView(image);
swirl_view=AcquireCacheView(swirl_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
MagickRealType
distance;
PointInfo
delta;
register IndexPacket
*restrict swirl_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
swirl_indexes=GetCacheViewAuthenticIndexQueue(swirl_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
MagickRealType
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) ((cosine*delta.x-sine*delta.y)/
scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+
center.y),&pixel,exception);
SetPixelPacket(swirl_image,&pixel,q,swirl_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SwirlImage)
#endif
proceed=SetImageProgress(image,SwirlImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *opacity,
% const PixelPacket tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *opacity,
const PixelPacket tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
color_vector,
pixel;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass) == MagickFalse)
{
InheritException(exception,&tint_image->exception);
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if (opacity == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
else
pixel.green=pixel.red;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
else
pixel.blue=pixel.red;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
else
pixel.opacity=(MagickRealType) OpaqueOpacity;
color_vector.red=(MagickRealType) (pixel.red*tint.red/100.0-
PixelIntensity(&tint));
color_vector.green=(MagickRealType) (pixel.green*tint.green/100.0-
PixelIntensity(&tint));
color_vector.blue=(MagickRealType) (pixel.blue*tint.blue/100.0-
PixelIntensity(&tint));
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
tint_view=AcquireCacheView(tint_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
pixel;
MagickRealType
weight;
weight=QuantumScale*p->red-0.5;
pixel.red=(MagickRealType) p->red+color_vector.red*(1.0-(4.0*
(weight*weight)));
SetRedPixelComponent(q,ClampRedPixelComponent(&pixel));
weight=QuantumScale*p->green-0.5;
pixel.green=(MagickRealType) p->green+color_vector.green*(1.0-(4.0*
(weight*weight)));
SetGreenPixelComponent(q,ClampGreenPixelComponent(&pixel));
weight=QuantumScale*p->blue-0.5;
pixel.blue=(MagickRealType) p->blue+color_vector.blue*(1.0-(4.0*
(weight*weight)));
SetBluePixelComponent(q,ClampBluePixelComponent(&pixel));
SetOpacityPixelComponent(q,GetOpacityPixelComponent(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TintImage)
#endif
proceed=SetImageProgress(image,TintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MaxTextExtent];
DrawInfo
*draw_info;
Image
*canvas_image,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas_image,DirectClass) == MagickFalse)
{
InheritException(exception,&canvas_image->exception);
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
canvas_image->matte=MagickTrue;
oval_image=CloneImage(canvas_image,canvas_image->columns,
canvas_image->rows,MagickTrue,exception);
if (oval_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
(void) QueryColorDatabase("#000000",&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorDatabase("#ffffff",&draw_info->fill,exception);
(void) QueryColorDatabase("#ffffff",&draw_info->stroke,exception);
(void) FormatMagickString(ellipse,MaxTextExtent,
"ellipse %g,%g,%g,%g,0.0,360.0",image->columns/2.0,
image->rows/2.0,image->columns/2.0-x,image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
blur_image->matte=MagickFalse;
(void) CompositeImage(canvas_image,CopyOpacityCompositeOp,blur_image,0,0);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception);
canvas_image=DestroyImage(canvas_image);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*image_view,
*wave_view;
Image
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
*sine_map;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0*
fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(wave_image,DirectClass) == MagickFalse)
{
InheritException(exception,&wave_image->exception);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
if (wave_image->background_color.opacity != OpaqueOpacity)
wave_image->matte=MagickTrue;
/*
Allocate sine map.
*/
sine_map=(MagickRealType *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (MagickRealType *) NULL)
{
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/
wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(wave_image,&zero);
image_view=AcquireCacheView(image);
wave_view=AcquireCacheView(wave_image);
(void) SetCacheViewVirtualPixelMethod(image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(wave_view);
pixel=zero;
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) x,(double) (y-sine_map[x]),&pixel,
exception);
SetPixelPacket(wave_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WaveImage)
#endif
proceed=SetImageProgress(image,WaveImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
image_view=DestroyCacheView(image_view);
sine_map=(MagickRealType *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
|
GB_binop__isgt_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_fp32)
// A*D function (colscale): GB (_AxD__isgt_fp32)
// D*A function (rowscale): GB (_DxB__isgt_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_fp32)
// C=scalar+B GB (_bind1st__isgt_fp32)
// C=scalar+B' GB (_bind1st_tran__isgt_fp32)
// C=A+scalar GB (_bind2nd__isgt_fp32)
// C=A'+scalar GB (_bind2nd_tran__isgt_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_FP32 || GxB_NO_ISGT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isgt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_int64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int64_uint32
// op(A') function: GB_tran__minv_int64_uint32
// C type: int64_t
// A type: uint32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int64_uint32
(
int64_t *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
interpolation_pc.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
static inline void InterpolateBlock_PC(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int dim_j = block->dim.j<<1;
int dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read = level_c->my_boxes[ block->read.box].vectors[id_c] + level_c->my_boxes[ block->read.box].ghosts*(1+level_c->my_boxes[ block->read.box].jStride+level_c->my_boxes[ block->read.box].kStride);
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
}
if(block->write.box>=0){
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->my_boxes[block->write.box].ghosts*(1+level_f->my_boxes[block->write.box].jStride+level_f->my_boxes[block->write.box].kStride);
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
}
int i,j,k;
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
write[write_ijk] = prescale_f*write[write_ijk] + read[read_ijk]; // CAREFUL !!! you must guarantee you zero'd the MPI buffers(write[]) and destination boxes at some point to avoid 0.0*NaN or 0.0*inf
}}}
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) piecewise constant interpolation
void interpolation_pc(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
uint64_t _timeCommunicationStart = CycleTime();
uint64_t _timeStart,_timeEnd;
int buffer=0;
int n;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends...
int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs;
MPI_Request *recv_requests = level_f->interpolation.requests;
MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
_timeStart = CycleTime();
hclib::finish([] {
hclib::loop_domain_1d loop(level_f->interpolation.num_recvs);
hclib::forasync(&loop, [] (int n) {
hclib::MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
6, // by convention, piecewise constant interpolation uses tag=6
MPI_COMM_WORLD,
&recv_requests[n]
);
});
});
_timeEnd = CycleTime();
level_f->cycles.interpolation_recv += (_timeEnd-_timeStart);
// pack MPI send buffers...
_timeStart = CycleTime();
// #pragma omp parallel for private(buffer) if(level_c->interpolation.num_blocks[0]>1) schedule(static,1)
hclib::finish([] {
hclib::loop_domain_1d loop(level_c->interpolation.num_blocks[0]);
hclib::forasync(&loop, [] (int buffer) {
InterpolateBlock_PC(level_f,id_f,0.0,level_c,id_c,
&level_c->interpolation.blocks[0][buffer]);
}, level_c->interpolation.num_blocks[0] <= 1);
});
_timeEnd = CycleTime();
level_f->cycles.interpolation_pack += (_timeEnd-_timeStart);
// loop through MPI send buffers and post Isend's...
_timeStart = CycleTime();
// #pragma omp parallel for schedule(dynamic,1)
hclib::finish([] {
hclib::loop_domain_1d loop(level_c->interpolation.num_sends);
hclib::forasync(&loop, [] (int n) {
hclib::MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
6, // by convention, piecewise constant interpolation uses tag=6
MPI_COMM_WORLD,
&send_requests[n]
);
});
});
_timeEnd = CycleTime();
level_f->cycles.interpolation_send += (_timeEnd-_timeStart);
#endif
// perform local interpolation... try and hide within Isend latency...
_timeStart = CycleTime();
// #pragma omp parallel for private(buffer) if(level_c->interpolation.num_blocks[1]>1) schedule(static,1)
hclib::finish([] {
hclib::loop_domain_1d loop(level_c->interpolation.num_blocks[1]);
hclib::forasync(&loop, [] (int buffer) {
InterpolateBlock_PC(level_f,id_f,prescale_f,level_c,id_c,
&level_c->interpolation.blocks[1][buffer]);
}, level_c->interpolation.num_blocks[1]<=1);
});
_timeEnd = CycleTime();
level_f->cycles.interpolation_local += (_timeEnd-_timeStart);
// wait for MPI to finish...
#ifdef USE_MPI
_timeStart = CycleTime();
if(nMessages)hclib::MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = CycleTime();
level_f->cycles.interpolation_wait += (_timeEnd-_timeStart);
// unpack MPI receive buffers
_timeStart = CycleTime();
// #pragma omp parallel for private(buffer) if(level_f->interpolation.num_blocks[2]>1) schedule(static,1)
hclib::finish([] {
hclib::loop_domain_1d loop(level_f->interpolation.num_blocks[2]);
hclib::forasync(&loop, [] (int buffer) {
IncrementBlock(level_f,id_f,prescale_f,
&level_f->interpolation.blocks[2][buffer]);
}, level_f->interpolation.num_blocks[2] <= 1);
});
_timeEnd = CycleTime();
level_f->cycles.interpolation_unpack += (_timeEnd-_timeStart);
#endif
level_f->cycles.interpolation_total += (uint64_t)(CycleTime()-_timeCommunicationStart);
}
|
wshfl.c | /* Copyright 2018-2019. Massachusetts Institute of Technology.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2018-2019 Siddharth Iyer <ssi@mit.edu>
*
* Tamir J, Uecker M, Chen W, Lai P, Alley MT, Vasanawala SS, Lustig M.
* T2 shuffling: Sharp, multicontrast, volumetric fast spin‐echo imaging.
* Magnetic resonance in medicine. 2017 Jan 1;77(1):180-95.
*
* B Bilgic, BA Gagoski, SF Cauley, AP Fan, JR Polimeni, PE Grant,
* LL Wald, and K Setsompop, Wave-CAIPI for highly accelerated 3D
* imaging. Magn Reson Med (2014) doi: 10.1002/mrm.25347
*
* Iyer S, Bilgic B, Setsompop K.
* Faster T2 shuffling with Wave.
* Presented in the session: "Signal Encoding and Decoding" at ISMRM 2018.
* https://www.ismrm.org/18/program_files/O67.htm
*/
#include <stdbool.h>
#include <complex.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/fft.h"
#include "num/init.h"
#include "num/iovec.h"
#include "num/ops.h"
#include "num/ops_p.h"
#ifdef USE_CUDA
#include "num/gpuops.h"
#endif
#include "iter/iter.h"
#include "iter/lsqr.h"
#include "iter/misc.h"
#include "linops/linop.h"
#include "linops/fmac.h"
#include "linops/someops.h"
#include "linops/decompose_complex.h"
#include "misc/debug.h"
#include "misc/mri.h"
#include "misc/utils.h"
#include "misc/mmio.h"
#include "misc/misc.h"
#include "misc/opts.h"
#include "wavelet/wavthresh.h"
#include "lowrank/lrthresh.h"
#include "grecon/optreg.h"
#include "grecon/italgo.h"
static const char usage_str[] = "<maps> <wave> <phi> <reorder> <table> <output>";
static const char help_str[] =
"Perform a wave-shuffling reconstruction.\n\n"
"Conventions:\n"
" * (sx, sy, sz) - Spatial dimensions.\n"
" * wx - Extended FOV in READ_DIM due to\n"
" wave's voxel spreading.\n"
" * (nc, md) - Number of channels and ESPIRiT's \n"
" extended-SENSE model operator\n"
" dimensions (or # of maps).\n"
" * (tf, tk) - Turbo-factor and the rank\n"
" of the temporal basis used in\n"
" shuffling.\n"
" * ntr - Number of TRs, or the number of\n"
" (ky, kz) points acquired of one\n"
" echo image.\n"
" * n - Total number of (ky, kz) points\n"
" acquired. This is equal to the\n"
" product of ntr and tf.\n\n"
"Descriptions:\n"
" * reorder is an (n by 3) index matrix such that\n"
" [ky, kz, t] = reorder(i, :) represents the\n"
" (ky, kz) kspace position of the readout line\n"
" acquired at echo number (t), and 0 <= ky < sy,\n"
" 0 <= kz < sz, 0 <= t < tf).\n"
" * table is a (wx by nc by n) matrix such that\n"
" table(:, :, k) represents the kth multichannel\n"
" kspace line.\n\n"
"Expected dimensions:\n"
" * maps - ( sx, sy, sz, nc, md, 1, 1)\n"
" * wave - ( wx, sy, sz, 1, 1, 1, 1)\n"
" * phi - ( 1, 1, 1, 1, 1, tf, tk)\n"
" * output - ( sx, sy, sz, 1, md, 1, tk)\n"
" * reorder - ( n, 3, 1, 1, 1, 1, 1)\n"
" * table - ( wx, nc, n, 1, 1, 1, 1)";
/* Helper function to print out operator dimensions. */
static void print_opdims(const struct linop_s* op)
{
const struct iovec_s* domain = linop_domain(op);
const struct iovec_s* codomain = linop_codomain(op);
debug_printf(DP_INFO, "\tDomain: [");
for (long k = 0; k < domain->N; k ++)
debug_printf(DP_INFO, "%6ld", domain->dims[k]);
debug_printf(DP_INFO, "]\n");
debug_printf(DP_INFO, "\tCodomain: [");
for (long k = 0; k < codomain->N; k ++)
debug_printf(DP_INFO, "%6ld", codomain->dims[k]);
debug_printf(DP_INFO, "]\n");
}
/* Construct sampling mask array from reorder tables. */
static void construct_mask(
long reorder_dims[DIMS], complex float* reorder,
long mask_dims[DIMS], complex float* mask)
{
long n = reorder_dims[0];
long sy = mask_dims[1];
long sz = mask_dims[2];
long y = 0;
long z = 0;
long t = 0;
for (int i = 0; i < n; i++) {
y = lround(creal(reorder[i]));
z = lround(creal(reorder[i + n]));
t = lround(creal(reorder[i + 2 * n]));
mask[(y + z * sy) + t * sy * sz] = 1;
}
}
struct kern_s {
INTERFACE(linop_data_t);
unsigned int N;
long* reorder_dims; // Dimension of the index table: ( n, 3, 1, 1, 1, 1, 1, 1)
long* phi_dims; // Dimension of the temporal basis: ( 1, 1, 1, 1, 1, tf, tk, 1)
long* table_dims; // Dimension of the data table: (wx, nc, n, 1, 1, 1, 1, 1)
long* kernel_dims; // Dimension of the kernel: ( 1, sy, sz, 1, 1, 1, tk, tk)
complex float* reorder;
complex float* phi;
complex float* kernel;
complex float* gpu_kernel;
};
static DEF_TYPEID(kern_s);
/* Go to table from coefficient-kspace with memory efficiency. */
static void kern_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long n = data->reorder_dims[0];
long tf = data->phi_dims[5];
long tk = data->phi_dims[6];
long input_dims[] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = wx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long perm_dims[] = { [0 ... DIMS - 1] = 1 };
perm_dims[0] = wx;
perm_dims[1] = nc;
perm_dims[3] = tk;
perm_dims[4] = sy;
perm_dims[5] = sz;
complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, src);
unsigned int permute_order[DIMS] = {0, 3, 5, 6, 1, 2, 4, 7};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, perm_dims, perm, input_dims, src, CFL_SIZE);
long vec_dims[] = {wx, nc, tf, 1};
long phi_mat_dims[] = { 1, 1, tf, tk};
long phi_in_dims[] = {wx, nc, 1, tk};
long fmac_dims[] = {wx, nc, tf, tk};
long line_dims[] = {wx, nc, 1, 1};
complex float* vec = md_alloc_sameplace(4, vec_dims, CFL_SIZE, src);
long vec_str[4];
md_calc_strides(4, vec_str, vec_dims, CFL_SIZE);
long phi_mat_str[4];
md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE);
long phi_in_str[4];
md_calc_strides(4, phi_in_str, phi_in_dims, CFL_SIZE);
long fmac_str[4];
md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE);
int y = -1;
int z = -1;
int t = -1;
for (int i = 0; i < n; i ++) {
y = lround(creal(data->reorder[i]));
z = lround(creal(data->reorder[i + n]));
t = lround(creal(data->reorder[i + 2 * n]));
md_clear(4, vec_dims, vec, CFL_SIZE);
md_zfmac2(4, fmac_dims, vec_str, vec, phi_in_str, (perm + ((wx * nc * tk) * (y + z * sy))), phi_mat_str, data->phi);
md_copy(4, line_dims, dst + (i * wx * nc), vec + (t * wx * nc), CFL_SIZE);
}
md_free(perm);
md_free(vec);
}
/* Collapse data table into the temporal basis for memory efficiency. */
static void kern_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long n = data->reorder_dims[0];
long tf = data->phi_dims[5];
long tk = data->phi_dims[6];
long perm_dims[] = { [0 ... DIMS - 1] = 1 };
perm_dims[0] = wx;
perm_dims[1] = nc;
perm_dims[3] = tk;
perm_dims[4] = sy;
perm_dims[5] = sz;
complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, dst);
md_clear(DIMS, perm_dims, perm, CFL_SIZE);
#ifdef _OPENMP
long num_threads = omp_get_max_threads();
#else
long num_threads = 1;
#endif
long vec_dims[] = {wx, nc, tf, 1};
long phi_mat_dims[] = { 1, 1, tf, tk};
long phi_out_dims[] = {wx, nc, 1, tk};
long fmac_dims[] = {wx, nc, tf, tk};
long line_dims[] = {wx, nc, 1, 1};
long vthrd_dims[] = {wx, nc, tf, 1, num_threads};
complex float* vec = md_alloc_sameplace(5, vthrd_dims, CFL_SIZE, dst);
md_clear(5, vthrd_dims, vec, CFL_SIZE);
long vec_str[4];
md_calc_strides(4, vec_str, vec_dims, CFL_SIZE);
long phi_mat_str[4];
md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE);
long phi_out_str[4];
md_calc_strides(4, phi_out_str, phi_out_dims, CFL_SIZE);
long fmac_str[4];
md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE);
long flag_dims[1] = { n };
complex float* flags = md_calloc(1, flag_dims, CFL_SIZE);
#pragma omp parallel for
for (int k = 0; k < n; k ++) {
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
int y = lround(creal(data->reorder[k]));
int z = lround(creal(data->reorder[k + n]));
int t = -1;
if (0 == flags[k]) {
md_clear(4, vec_dims, vec + (wx * nc * tf * tid), CFL_SIZE);
for (int i = k; i < n; i ++) {
if ((y == lround(creal(data->reorder[i]))) && (z == lround(creal(data->reorder[i + n])))) {
flags[i] = 1;
t = lround(creal(data->reorder[i + 2 * n]));
md_copy(4, line_dims, (vec + (wx * nc * tf * tid) + t * wx * nc), (src + i * wx * nc), CFL_SIZE);
}
}
md_zfmacc2(4, fmac_dims, phi_out_str, perm + (y + z * sy) * (wx * nc * tk), vec_str, vec + (wx * nc * tf * tid), phi_mat_str, data->phi);
}
}
long out_dims[] = { [0 ... DIMS - 1] = 1 };
out_dims[0] = wx;
out_dims[1] = sy;
out_dims[2] = sz;
out_dims[3] = nc;
out_dims[6] = tk;
unsigned int permute_order[DIMS] = {0, 4, 5, 1, 6, 2, 3, 7};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, out_dims, dst, perm_dims, perm, CFL_SIZE);
md_free(vec);
md_free(perm);
md_free(flags);
}
static void kern_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long tk = data->phi_dims[6];
long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = wx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long input_str[DIMS];
md_calc_strides(DIMS, input_str, input_dims, CFL_SIZE);
long output_dims[DIMS];
md_copy_dims(DIMS, output_dims, input_dims);
output_dims[6] = 1;
output_dims[7] = tk;
long output_str[DIMS];
md_calc_strides(DIMS, output_str, output_dims, CFL_SIZE);
long gpu_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, gpu_kernel_dims, data->kernel_dims);
gpu_kernel_dims[0] = wx;
gpu_kernel_dims[3] = nc;
long kernel_str[DIMS];
md_calc_strides(DIMS, kernel_str, data->kernel_dims, CFL_SIZE);
long gpu_kernel_str[DIMS];
md_calc_strides(DIMS, gpu_kernel_str, gpu_kernel_dims, CFL_SIZE);
long fmac_dims[DIMS];
md_merge_dims(DIMS, fmac_dims, input_dims, data->kernel_dims);
md_clear(DIMS, output_dims, dst, CFL_SIZE);
#ifdef USE_CUDA
if(cuda_ondevice(src))
md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, gpu_kernel_str, data->gpu_kernel);
else
#endif
md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, kernel_str, data->kernel);
}
static void kern_free(const linop_data_t* _data)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
xfree(data->reorder_dims);
xfree(data->phi_dims);
xfree(data->table_dims);
xfree(data->kernel_dims);
#ifdef USE_CUDA
if (data->gpu_kernel != NULL)
md_free(data->gpu_kernel);
#endif
xfree(data);
}
static const struct linop_s* linop_kern_create(bool gpu_flag,
const long _reorder_dims[DIMS], complex float* reorder,
const long _phi_dims[DIMS], complex float* phi,
const long _kernel_dims[DIMS], complex float* kernel,
const long _table_dims[DIMS])
{
PTR_ALLOC(struct kern_s, data);
SET_TYPEID(kern_s, data);
PTR_ALLOC(long[DIMS], reorder_dims);
PTR_ALLOC(long[DIMS], phi_dims);
PTR_ALLOC(long[DIMS], table_dims);
PTR_ALLOC(long[DIMS], kernel_dims);
md_copy_dims(DIMS, *reorder_dims, _reorder_dims);
md_copy_dims(DIMS, *phi_dims, _phi_dims);
md_copy_dims(DIMS, *table_dims, _table_dims);
md_copy_dims(DIMS, *kernel_dims, _kernel_dims);
data->reorder_dims = *PTR_PASS(reorder_dims);
data->phi_dims = *PTR_PASS(phi_dims);
data->table_dims = *PTR_PASS(table_dims);
data->kernel_dims = *PTR_PASS(kernel_dims);
data->reorder = reorder;
data->phi = phi;
data->kernel = kernel;
data->gpu_kernel = NULL;
#ifdef USE_CUDA
if(gpu_flag) {
long repmat_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, repmat_kernel_dims, _kernel_dims);
repmat_kernel_dims[0] = _table_dims[0];
repmat_kernel_dims[3] = _table_dims[1];
long kernel_strs[DIMS];
long repmat_kernel_strs[DIMS];
md_calc_strides(DIMS, kernel_strs, _kernel_dims, CFL_SIZE);
md_calc_strides(DIMS, repmat_kernel_strs, repmat_kernel_dims, CFL_SIZE);
complex float* repmat_kernel = md_calloc(DIMS, repmat_kernel_dims, CFL_SIZE);
md_copy2(DIMS, repmat_kernel_dims, repmat_kernel_strs, repmat_kernel, kernel_strs, kernel, CFL_SIZE);
data->gpu_kernel = md_gpu_move(DIMS, repmat_kernel_dims, repmat_kernel, CFL_SIZE);
md_free(repmat_kernel);
}
#else
UNUSED(gpu_flag);
#endif
long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = _table_dims[0];
input_dims[1] = _kernel_dims[1];
input_dims[2] = _kernel_dims[2];
input_dims[3] = _table_dims[1];
input_dims[6] = _phi_dims[6];
long output_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
output_dims[0] = _table_dims[0];
output_dims[1] = _table_dims[1];
output_dims[2] = _reorder_dims[0];
const struct linop_s* K = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), kern_apply, kern_adjoint, kern_normal, NULL, kern_free);
return K;
}
struct multc_s {
INTERFACE(linop_data_t);
unsigned int nc;
unsigned int md;
const complex float* maps;
const struct linop_s* sc_op; // Single channel operator.
};
static DEF_TYPEID(multc_s);
static void multc_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* fwd = data->sc_op->forward;
const long* sc_inp_dims = linop_domain(data->sc_op)->dims;
const long* sc_out_dims = linop_codomain(data->sc_op)->dims;
long sx = sc_inp_dims[0];
long sy = sc_inp_dims[1];
long sz = sc_inp_dims[2];
long wx = sc_out_dims[0];
long n = sc_out_dims[2];
long nc = data->nc;
long md = data->md;
long src_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, src_dims, sc_inp_dims);
src_dims[MAPS_DIM] = md;
long dst_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dst_dims, sc_out_dims);
dst_dims[1] = nc;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer = md_alloc_sameplace(DIMS, sc_inp_dims, CFL_SIZE, src);
long tbl_dims[] = { [0 ... DIMS - 1] = 1};
tbl_dims[0] = wx;
tbl_dims[1] = n;
tbl_dims[2] = nc;
complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src);
md_clear(DIMS, tbl_dims, tbl, CFL_SIZE);
long pos[] = { [0 ... DIMS - 1] = 0 };
long zfmac_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, zfmac_dims, src_dims);
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_src[DIMS];
md_calc_strides(DIMS, strides_src, src_dims, CFL_SIZE);
long strides_sc_inp[DIMS];
md_calc_strides(DIMS, strides_sc_inp, sc_inp_dims, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_inp_dims, buffer, CFL_SIZE);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmac2(DIMS, zfmac_dims, strides_sc_inp, buffer, strides_src, src, strides_single_map, single_map);
operator_apply(fwd, DIMS, sc_out_dims, tbl + (wx * n * k), DIMS, sc_inp_dims, buffer);
}
md_clear(DIMS, dst_dims, dst, CFL_SIZE);
unsigned int permute_order[DIMS] = {0, 2, 1};
for (unsigned int i = 3; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, dst_dims, dst, tbl_dims, tbl, CFL_SIZE);
md_free(single_map);
md_free(buffer);
md_free(tbl);
}
static void multc_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* adj = data->sc_op->adjoint;
const long* sc_inp_dims = linop_codomain(data->sc_op)->dims;
const long* sc_out_dims = linop_domain(data->sc_op)->dims;
long sx = sc_out_dims[0];
long sy = sc_out_dims[1];
long sz = sc_out_dims[2];
long wx = sc_inp_dims[0];
long n = sc_inp_dims[2];
long nc = data->nc;
long md = data->md;
long src_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, src_dims, sc_inp_dims);
src_dims[1] = nc;
long dst_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dst_dims, sc_out_dims);
dst_dims[MAPS_DIM] = md;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer1 = md_alloc_sameplace(DIMS, sc_out_dims, CFL_SIZE, src);
complex float* buffer2 = md_alloc_sameplace(DIMS, dst_dims, CFL_SIZE, src);
long tbl_dims[] = { [0 ... DIMS - 1] = 1};
tbl_dims[0] = wx;
tbl_dims[2] = n;
complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src);
long pos[] = { [0 ... DIMS - 1] = 0 };
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_sc_out[DIMS];
md_calc_strides(DIMS, strides_sc_out, sc_out_dims, CFL_SIZE);
long strides_dst[DIMS];
md_calc_strides(DIMS, strides_dst, dst_dims, CFL_SIZE);
md_clear(DIMS, dst_dims, dst, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_out_dims, buffer1, CFL_SIZE);
md_clear(DIMS, dst_dims, buffer2, CFL_SIZE);
md_clear(DIMS, tbl_dims, tbl, CFL_SIZE);
pos[1] = k;
md_slice(DIMS, 2, pos, src_dims, tbl, src, CFL_SIZE);
pos[1] = 0;
operator_apply(adj, DIMS, sc_out_dims, buffer1, DIMS, tbl_dims, tbl);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmacc2(DIMS, dst_dims, strides_dst, buffer2, strides_sc_out, buffer1, strides_single_map, single_map);
md_zadd(DIMS, dst_dims, dst, dst, buffer2);
}
md_free(single_map);
md_free(buffer1);
md_free(buffer2);
md_free(tbl);
}
static void multc_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* nrm = data->sc_op->normal;
const long* sc_dims = linop_domain(data->sc_op)->dims;
long sx = sc_dims[0];
long sy = sc_dims[1];
long sz = sc_dims[2];
long nc = data->nc;
long md = data->md;
long dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dims, sc_dims);
dims[MAPS_DIM] = md;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer1 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src);
complex float* buffer2 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src);
complex float* buffer3 = md_alloc_sameplace(DIMS, dims, CFL_SIZE, src);
long pos[] = { [0 ... DIMS - 1] = 0 };
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_sc[DIMS];
md_calc_strides(DIMS, strides_sc, sc_dims, CFL_SIZE);
long strides[DIMS];
md_calc_strides(DIMS, strides, dims, CFL_SIZE);
md_clear(DIMS, dims, dst, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_dims, buffer1, CFL_SIZE);
md_clear(DIMS, sc_dims, buffer2, CFL_SIZE);
md_clear(DIMS, dims, buffer3, CFL_SIZE);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmac2(DIMS, dims, strides_sc, buffer1, strides, src, strides_single_map, single_map);
operator_apply(nrm, DIMS, sc_dims, buffer2, DIMS, sc_dims, buffer1);
md_zfmacc2(DIMS, dims, strides, buffer3, strides_sc, buffer2, strides_single_map, single_map);
md_zadd(DIMS, dims, dst, dst, buffer3);
}
md_free(single_map);
md_free(buffer1);
md_free(buffer2);
md_free(buffer3);
}
static void multc_free(const linop_data_t* _data)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
xfree(data);
}
static struct linop_s* linop_multc_create(long nc, long md, const complex float* maps, const struct linop_s* sc_op)
{
PTR_ALLOC(struct multc_s, data);
SET_TYPEID(multc_s, data);
data->nc = nc;
data->md = md;
data->maps = maps;
data->sc_op = sc_op;
long* op_inp_dims = (long*) linop_domain(sc_op)->dims;
long* op_out_dims = (long*) linop_codomain(sc_op)->dims;
long input_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, input_dims, op_inp_dims);
input_dims[MAPS_DIM] = md;
long output_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, output_dims, op_out_dims);
output_dims[1] = nc;
struct linop_s* E = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), multc_apply, multc_adjoint, multc_normal, NULL, multc_free);
return E;
}
/* Resize operator. */
static const struct linop_s* linop_wavereshape_create(long wx, long sx, long sy, long sz, long nc, long tk)
{
long input_dims[] = { [0 ... DIMS - 1] = 1};
input_dims[0] = sx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long output_dims[DIMS];
md_copy_dims(DIMS, output_dims, input_dims);
output_dims[0] = wx;
struct linop_s* R = linop_resize_create(DIMS, output_dims, input_dims);
return R;
}
/* Fx operator. */
static const struct linop_s* linop_fx_create(long wx, long sy, long sz, long nc, long tk, bool centered)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* Fx = NULL;
if (centered)
Fx = linop_fftc_create(DIMS, dims, READ_FLAG);
else
Fx = linop_fft_create(DIMS, dims, READ_FLAG);
return Fx;
}
/* Wave operator. */
static const struct linop_s* linop_wave_create(long wx, long sy, long sz, long nc, long tk, long psf_tk, complex float* psf)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
return (psf_tk > 1) ? linop_cdiag_create(DIMS, dims, FFT_FLAGS | COEFF_FLAG, psf) : linop_cdiag_create(DIMS, dims, FFT_FLAGS, psf);
}
/* Fyz operator. */
static const struct linop_s* linop_fyz_create(long wx, long sy, long sz, long nc, long tk, bool centered)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* Fyz = NULL;
if (centered)
Fyz = linop_fftc_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG);
else
Fyz = linop_fft_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG);
return Fyz;
}
/* Construction sampling temporal kernel.*/
static void construct_kernel(
long mask_dims[DIMS], complex float* mask,
long phi_dims[DIMS], complex float* phi,
long kern_dims[DIMS], complex float* kern)
{
long sy = mask_dims[1];
long sz = mask_dims[2];
long tf = phi_dims[5];
long tk = phi_dims[6];
long cvec_dims[] = { [0 ... DIMS - 1] = 1 };
cvec_dims[6] = tk;
long cvec_str[DIMS];
md_calc_strides(DIMS, cvec_str, cvec_dims, CFL_SIZE);
complex float cvec[tk];
long tvec_dims[] = { [0 ... DIMS - 1] = 1 };
tvec_dims[5] = tf;
long tvec_str[DIMS];
md_calc_strides(DIMS, tvec_str, tvec_dims, CFL_SIZE);
complex float mvec[tf];
complex float tvec1[tf];
complex float tvec2[tf];
long phi_str[DIMS];
md_calc_strides(DIMS, phi_str, phi_dims, CFL_SIZE);
long out_dims[] = { [0 ... DIMS - 1] = 1 };
out_dims[0] = tk;
out_dims[1] = sy;
out_dims[2] = sz;
out_dims[3] = tk;
complex float* out = md_calloc(DIMS, out_dims, CFL_SIZE);
for (int y = 0; y < sy; y ++) {
for (int z = 0; z < sz; z ++) {
for (int t = 0; t < tf; t ++)
mvec[t] = mask[(y + sy * z) + (sy * sz) * t];
for (int t = 0; t < tk; t ++) {
cvec[t] = 1;
md_clear(DIMS, tvec_dims, tvec1, CFL_SIZE);
md_zfmac2(DIMS, phi_dims, tvec_str, tvec1, cvec_str, cvec, phi_str, phi);
md_clear(DIMS, tvec_dims, tvec2, CFL_SIZE);
md_zfmac2(DIMS, tvec_dims, tvec_str, tvec2, tvec_str, tvec1, tvec_str, mvec);
md_clear(DIMS, cvec_dims, out + y * tk + z * sy * tk + t * sy * sz * tk, CFL_SIZE);
md_zfmacc2(DIMS, phi_dims, cvec_str, out + y * tk + z * sy * tk + t * sy * sz * tk,
tvec_str, tvec2, phi_str, phi);
cvec[t] = 0;
}
}
}
unsigned int permute_order[DIMS] = {4, 1, 2, 5, 6, 7, 3, 0};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, kern_dims, kern, out_dims, out, CFL_SIZE);
md_free(out);
}
static void fftmod_apply(long sy, long sz,
long reorder_dims[DIMS], complex float* reorder,
long table_dims[DIMS], complex float* table,
long maps_dims[DIMS], complex float* maps)
{
long wx = table_dims[0];
long nc = table_dims[1];
fftmod(DIMS, table_dims, READ_FLAG, table, table);
fftmod(DIMS, maps_dims, FFT_FLAGS, maps, maps);
long y = -1;
long z = -1;
double dy = ((double) sy/2)/((double) sy);
double dz = ((double) sz/2)/((double) sz);
complex float py = 1;
complex float pz = 1;
long dims[] = { [0 ... DIMS] = 1};
dims[0] = wx;
dims[1] = nc;
long n = reorder_dims[0];
for (long k = 0; k < n; k++) {
y = lround(creal(reorder[k]));
z = lround(creal(reorder[k + n]));
py = cexp(2.i * M_PI * dy * y);
pz = cexp(2.i * M_PI * dz * z);
md_zsmul(DIMS, dims, table + k * wx * nc, table + k * wx * nc, py * pz);
}
}
int main_wshfl(int argc, char* argv[])
{
double start_time = timestamp();
struct opt_reg_s ropts;
opt_reg_init(&ropts);
int maxiter = 30;
int cgiter = 10;
int blksize = 8;
float rho = 1;
bool hgwld = false;
bool ksp = false;
const char* fwd = NULL;
const char* x0 = NULL;
int gpun = -1;
bool dcx = false;
const struct opt_s opts[] = {
{ 'R', NULL, true, opt_reg, &ropts, "<T>:A:B:C\tGeneralized regularization options. (-Rh for help)" },
OPT_INT( 'b', &blksize, "blkdim", "Block size for locally low rank."),
OPT_INT( 'i', &maxiter, "mxiter", "Maximum number of iterations."),
OPT_INT( 'j', &cgiter, "cgiter", "Maximum number of CG iterations in ADMM."),
OPT_FLOAT( 's', &rho, "admrho", "ADMM Rho value."),
OPT_STRING( 'F', &fwd, "frwrd", "Go from shfl-coeffs to data-table. Pass in coeffs path."),
OPT_STRING( 'O', &x0, "initl", "Initialize reconstruction with guess."),
OPT_INT( 'g', &gpun, "gpunm", "GPU device number."),
OPT_SET( 'K', &ksp, "Go from data-table to shuffling basis k-space."),
OPT_SET( 'H', &hgwld, "Use hogwild."),
OPT_SET( 'v', &dcx, "Split coefficients to real and imaginary components."),
};
cmdline(&argc, argv, 6, 6, usage_str, help_str, ARRAY_SIZE(opts), opts);
struct admm_conf admm = { false, false, false, rho, cgiter };
debug_printf(DP_INFO, "Loading data... ");
long maps_dims[DIMS];
complex float* maps = load_cfl(argv[1], DIMS, maps_dims);
long wave_dims[DIMS];
complex float* wave = load_cfl(argv[2], DIMS, wave_dims);
long phi_dims[DIMS];
complex float* phi = load_cfl(argv[3], DIMS, phi_dims);
long reorder_dims[DIMS];
complex float* reorder = load_cfl(argv[4], DIMS, reorder_dims);
long table_dims[DIMS];
complex float* table = load_cfl(argv[5], DIMS, table_dims);
debug_printf(DP_INFO, "Done.\n");
if (gpun >= 0)
num_init_gpu_device(gpun);
else
num_init();
int wx = wave_dims[0];
int sx = maps_dims[0];
int sy = maps_dims[1];
int sz = maps_dims[2];
int nc = maps_dims[3];
int md = maps_dims[4];
int tf = phi_dims[5];
int tk = phi_dims[6];
debug_printf(DP_INFO, "Constructing sampling mask from reorder table... ");
long mask_dims[] = { [0 ... DIMS - 1] = 1 };
mask_dims[1] = sy;
mask_dims[2] = sz;
mask_dims[5] = tf;
complex float* mask = md_calloc(DIMS, mask_dims, CFL_SIZE);
construct_mask(reorder_dims, reorder, mask_dims, mask);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Constructing sampling-temporal kernel... ");
long kernel_dims[] = { [0 ... DIMS - 1] = 1 };
kernel_dims[1] = sy;
kernel_dims[2] = sz;
kernel_dims[6] = tk;
kernel_dims[7] = tk;
complex float* kernel = md_calloc(DIMS, kernel_dims, CFL_SIZE);
construct_kernel(mask_dims, mask, phi_dims, phi, kernel_dims, kernel);
md_free(mask);
debug_printf(DP_INFO, "Done.\n");
long coeff_dims[] = { [0 ... DIMS - 1] = 1 };
coeff_dims[0] = sx;
coeff_dims[1] = sy;
coeff_dims[2] = sz;
coeff_dims[4] = md;
coeff_dims[6] = tk;
coeff_dims[8] = dcx ? 2 : 1;
if (ksp == true) {
const struct linop_s* Knc = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, table_dims);
long ksp_dims[] = { [0 ... DIMS - 1] = 1 };
ksp_dims[0] = wx;
ksp_dims[1] = sy;
ksp_dims[2] = sz;
ksp_dims[3] = nc;
ksp_dims[6] = tk;
complex float* res = create_cfl(argv[6], DIMS, ksp_dims);
operator_apply(Knc->adjoint, DIMS, ksp_dims, res, DIMS, table_dims, table);
linop_free(Knc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, ksp_dims, res);
return 0;
}
debug_printf(DP_INFO, "Creating single channel linear operators:\n");
double t1;
double t2;
t1 = timestamp();
const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk);
t2 = timestamp();
debug_printf(DP_INFO, "\tR: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* Fx = linop_fx_create(wx, sy, sz, 1, tk, false);
t2 = timestamp();
debug_printf(DP_INFO, "\tFx: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave);
t2 = timestamp();
debug_printf(DP_INFO, "\tW: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* Fyz = linop_fyz_create(wx, sy, sz, 1, tk, false);
t2 = timestamp();
debug_printf(DP_INFO, "\tFyz: %f seconds.\n", t2 - t1);
t1 = timestamp();
long single_channel_table_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_channel_table_dims, table_dims);
single_channel_table_dims[1] = 1;
const struct linop_s* K = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims);
t2 = timestamp();
debug_printf(DP_INFO, "\tK: %f seconds.\n", t2 - t1);
struct linop_s* A_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF(
R, Fx), W), Fyz), K);
debug_printf(DP_INFO, "Single channel forward operator information:\n");
print_opdims(A_sc);
struct linop_s* A = linop_multc_create(nc, md, maps, A_sc);
debug_printf(DP_INFO, "Overall forward linear operator information:\n");
print_opdims(A);
if (fwd != NULL) {
debug_printf(DP_INFO, "Going from coefficients to data table... ");
complex float* coeffs_to_fwd = load_cfl(fwd, DIMS, coeff_dims);
complex float* table_forward = create_cfl(argv[6], DIMS, table_dims);
const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk);
const struct linop_s* CFx = linop_fx_create( wx, sy, sz, 1, tk, true);
const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave);
const struct linop_s* CFyz = linop_fyz_create(wx, sy, sz, 1, tk, true);
const struct linop_s* K = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims);
struct linop_s* AC_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF(
R, CFx), W), CFyz), K);
struct linop_s* AC = linop_multc_create(nc, md, maps, AC_sc);
operator_apply(AC->forward, DIMS, table_dims, table_forward, DIMS, coeff_dims, coeffs_to_fwd);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Cleaning up... ");
linop_free(AC);
linop_free(AC_sc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, table_dims, table_forward);
debug_printf(DP_INFO, "Done.\n");
return 0;
}
if (dcx) {
debug_printf(DP_INFO, "\tSplitting result into real and imaginary components.\n");
struct linop_s* tmp = A;
struct linop_s* dcxop = linop_decompose_complex_create(DIMS, ITER_DIM, linop_domain(A)->dims);
A = linop_chain(dcxop, tmp);
debug_printf(DP_INFO, "New operator information:\n");
print_opdims(A);
linop_free(dcxop);
linop_free(tmp);
}
debug_printf(DP_INFO, "Normalizing data table and applying fftmod to table and maps... ");
float norm = md_znorm(DIMS, table_dims, table);
md_zsmul(DIMS, table_dims, table, table, 1. / norm);
fftmod_apply(sy, sz, reorder_dims, reorder, table_dims, table, maps_dims, maps);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Preparing reconstruction operator... ");
const struct operator_p_s* thresh_ops[NUM_REGS] = { NULL };
const struct linop_s* trafos[NUM_REGS] = { NULL };
opt_reg_configure(DIMS, coeff_dims, &ropts, thresh_ops, trafos, blksize, 1, gpun >= 0);
int nr_penalties = ropts.r;
struct reg_s* regs = ropts.regs;
enum algo_t algo = ALGO_ADMM;
struct iter it = italgo_config(algo, nr_penalties, regs, maxiter, -1, hgwld, false, admm, 1, false);
debug_printf(DP_INFO, "Done.\n");
complex float* init = NULL;
if (x0 != NULL) {
debug_printf(DP_INFO, "Loading in initial guess... ");
init = load_cfl(x0, DIMS, coeff_dims);
debug_printf(DP_INFO, "Done.\n");
}
debug_printf(DP_INFO, "Reconstruction... ");
complex float* recon = create_cfl(argv[6], DIMS, coeff_dims);
struct lsqr_conf lsqr_conf = { 0., gpun >= 0 };
double recon_start = timestamp();
const struct operator_p_s* J = lsqr2_create(&lsqr_conf, it.italgo, it.iconf, (const float*) init, A, NULL, nr_penalties, thresh_ops, trafos, NULL);
operator_p_apply(J, 1., DIMS, coeff_dims, recon, DIMS, table_dims, table);
md_zsmul(DIMS, coeff_dims, recon, recon, norm);
double recon_end = timestamp();
debug_printf(DP_INFO, "Done.\nReconstruction time: %f seconds.\n", recon_end - recon_start);
debug_printf(DP_INFO, "Cleaning up and saving result... ");
operator_p_free(J);
linop_free(A);
linop_free(A_sc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, coeff_dims, recon);
if (x0 != NULL)
unmap_cfl(DIMS, coeff_dims, init);
debug_printf(DP_INFO, "Done.\n");
double end_time = timestamp();
debug_printf(DP_INFO, "Total time: %f seconds.\n", end_time - start_time);
return 0;
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
struct OMPTraitProperty;
struct OMPTraitSelector;
struct OMPTraitSet;
class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
mutable IdentifierInfo *Ident_abstract;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector"
/// and "bool" fast comparison. Only present if AltiVec or ZVector are
/// enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
IdentifierInfo *Ident_Bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> MSFenvAccess;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFenvAccessHandler;
std::unique_ptr<PragmaHandler> STDCFenvRoundHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// Current kind of OpenMP clause
OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
void MaybeDestroyTemplateIds() {
if (!TemplateIds.empty() &&
(Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
DestroyTemplateIds();
}
void DestroyTemplateIds();
/// RAII object to destroy TemplateIdAnnotations where possible, from a
/// likely-good position during parsing.
struct DestroyTemplateIdAnnotationsRAIIObj {
Parser &Self;
DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
};
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ROUND...
void HandlePragmaFEnvRound();
/// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static TypeResult getTypeAnnotation(const Token &Tok) {
if (!Tok.getAnnotationValue())
return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, TypeResult T) {
assert((T.isInvalid() || T.get()) &&
"produced a valid-but-null type annotation?");
Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
Tok.getIdentifierInfo() != Ident_Bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser &p)
: P(p), PrevPreferredType(P.PreferredType) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
/// Kinds of compound pseudo-tokens formed by a sequence of two real tokens.
enum class CompoundToken {
/// A '(' '{' beginning a statement-expression.
StmtExprBegin,
/// A '}' ')' ending a statement-expression.
StmtExprEnd,
/// A '[' '[' beginning a C++11 or C2x attribute.
AttrBegin,
/// A ']' ']' ending a C++11 or C2x attribute.
AttrEnd,
/// A '::' '*' forming a C++ pointer-to-member declaration.
MemberPtr,
};
/// Check that a compound operator was written in a "sensible" way, and warn
/// if not.
void checkCompoundToken(SourceLocation FirstTokLoc,
tok::TokenKind FirstTokKind, CompoundToken Op);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// Introduces zero or more scopes for parsing. The scopes will all be exited
/// when the object is destroyed.
class MultiParseScope {
Parser &Self;
unsigned NumScopes = 0;
MultiParseScope(const MultiParseScope&) = delete;
public:
MultiParseScope(Parser &Self) : Self(Self) {}
void Enter(unsigned ScopeFlags) {
Self.EnterScope(ScopeFlags);
++NumScopes;
}
void Exit() {
while (NumScopes) {
Self.ExitScope();
--NumScopes;
}
}
~MultiParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
/// Re-enter the template scopes for a declaration that might be a template.
unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser *Self;
/// Method - The method declaration.
Decl *Method;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), IsInterface(IsInterface),
TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
// In ParseCXXInlineMethods.cpp.
struct ReenterTemplateScopeRAII;
struct ReenterClassScopeRAII;
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseSYCLUniqueStableNameExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr,
bool EnterForConditionScope = false);
DeclGroupPtrTy
ParseAliasDeclarationInInitStatement(DeclaratorContext Context,
ParsedAttributesWithRange &Attrs);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
struct DesignatorCompletionInfo {
SmallVectorImpl<Expr *> &InitExprs;
QualType PreferredBaseType;
};
ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
SourceLocation *LParenLoc = nullptr,
SourceLocation *RParenLoc = nullptr);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Whether a defining-type-specifier is permitted in a given context.
enum class AllowDefiningTypeSpec {
/// The grammar doesn't allow a defining-type-specifier here, and we must
/// not parse one (eg, because a '{' could mean something else).
No,
/// The grammar doesn't allow a defining-type-specifier here, but we permit
/// one for error recovery purposes. Sema will reject.
NoButErrorRecovery,
/// The grammar allows a defining-type-specifier here, even though it's
/// always invalid. Sema will reject.
YesButInvalid,
/// The grammar allows a defining-type-specifier here, and one can be valid.
Yes
};
/// Is this a context in which we are parsing defining-type-specifiers (and
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
return AllowDefiningTypeSpec::Yes;
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
return AllowDefiningTypeSpec::YesButInvalid;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
case DeclSpecContext::DSC_trailing:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which an opaque-enum-declaration can appear?
static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
return true;
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return Tok.is(tok::kw_using) ||
isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Determine whether we could have an enum-base.
///
/// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
/// only consider this to be an enum-base if the next token is a '{'.
///
/// \return \c false if this cannot possibly be an enum base; \c true
/// otherwise.
bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult
ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context = DeclaratorContext::TypeName,
AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID,
bool DiagnoseEmptyAttrs = false);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Emit warnings for C++11 and C2x attributes that are in a position that
/// clang accepts as an extension.
void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs);
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
enum ParseAttrKindMask {
PAKM_GNU = 1 << 0,
PAKM_Declspec = 1 << 1,
PAKM_CXX11 = 1 << 2,
};
/// \brief Parse attributes based on what syntaxes are desired, allowing for
/// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec:
/// __attribute__((...)) __declspec(...) __attribute__((...)))
/// Note that Microsoft attributes (spelled with single square brackets) are
/// not supported by this because of parsing ambiguities with other
/// constructs.
///
/// There are some attribute parse orderings that should not be allowed in
/// arbitrary order. e.g.,
///
/// [[]] __attribute__(()) int i; // OK
/// __attribute__(()) [[]] int i; // Not OK
///
/// Such situations should use the specific attribute parsing functionality.
void ParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr);
void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
}
/// \brief Possibly parse attributes based on what syntaxes are desired,
/// allowing for the order to vary.
bool MaybeParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
bool MaybeParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
return true;
}
return false;
}
bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
return true;
}
return false;
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
void ParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D);
Attrs.takeAllFrom(AttrsWithRange);
}
void ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) {
// If parsing the attributes found an OpenMP directive, emit those tokens
// to the parse stream now.
if (!OpenMPTokens.empty()) {
PP.EnterToken(Tok, /*IsReinject*/ true);
PP.EnterTokenStream(OpenMPTokens, /*DisableMacroExpansion*/ true,
/*IsReinject*/ true);
ConsumeAnyToken(/*ConsumeCodeCompletionTok*/ true);
}
}
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
return true;
}
return false;
}
bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
ParseCXX11Attributes(attrs, endLoc);
return true;
}
return false;
}
void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName,
CachedTokens &OpenMPTokens);
void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
CachedTokens &OpenMPTokens,
SourceLocation *EndLoc = nullptr);
void ParseCXX11AttributeSpecifier(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr) {
CachedTokens OpenMPTokens;
ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc);
ReplayOpenMPAttributeTokens(OpenMPTokens);
}
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
CachedTokens &OpenMPTokens);
IdentifierInfo *TryParseCXX11AttributeIdentifier(
SourceLocation &Loc,
Sema::AttributeCompletion Completion = Sema::AttributeCompletion::None,
const IdentifierInfo *EnclosingScope = nullptr);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) {
ParseMicrosoftDeclSpecs(Attrs, End);
return true;
}
return false;
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
void ParsePtrauthQualifier(ParsedAttributes &Attrs);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
bool isClassCompatibleKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &Attrs,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI,
OMPTraitInfo *ParentTI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse 'omp [begin] assume[s]' directive.
void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parse 'omp end assumes' directive.
void ParseOpenMPEndAssumesDirective(SourceLocation Loc);
/// Parse clauses for '#pragma omp [begin] declare target'.
void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI);
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind,
OpenMPDirectiveKind EndDKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
/// it is not the current token.
void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
/// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
/// that the "end" matching the "begin" directive of kind \p BeginKind was not
/// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
/// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
OpenMPDirectiveKind ExpectedKind,
OpenMPDirectiveKind FoundKind,
SourceLocation MatchingLoc,
SourceLocation FoundLoc,
bool SkipUntilOpenMPEnd);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses the 'sizes' clause of a '#pragma omp tile' directive.
OMPClause *ParseOpenMPSizesClause();
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
/// Parses and creates OpenMP 5.0 iterators expression:
/// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
/// <range-specification> }+ ')'
ExprResult ParseOpenMPIteratorsExpr();
/// Parses allocators and traits in the context of the uses_allocator clause.
/// Expected format:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
/// Parses clause with an interop variable of kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
//
OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
MotionModifiers;
SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
/// Parse the given string as a type.
///
/// This is a dangerous utility function currently employed only by API notes.
/// It is not a general entry-point for safely parsing types from strings.
///
/// \param typeStr The string to be parsed as a type.
/// \param context The name of the context in which this string is being
/// parsed, which will be used in diagnostics.
/// \param includeLoc The location at which this parse was triggered.
TypeResult parseTypeFromString(StringRef typeStr, StringRef context,
SourceLocation includeLoc);
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
ExprResult ParseBuiltinPtrauthTypeDiscriminator();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
bench.c | #include "omp.h"
#include "pmsis.h"
#define LOOP_ITER (2048)
#define NB_ITER (256)
#define NB_BARRIER_ITER (256)
#define NB_ITER_SINGLE (128)
#define CORE_ID pi_core_id()
#define PRINTF(...)
//#define PRINTF(...) printf(__VA_ARGS__)
static void start_timer()
{
pi_perf_cl_reset();
pi_perf_conf(1<<PI_PERF_CYCLES);
pi_perf_cl_start();
}
static void reset_timer()
{
pi_perf_cl_reset();
}
static unsigned int get_time()
{
return pi_perf_cl_read(PI_PERF_CYCLES);
}
static inline unsigned int startTimer() {
PRINTF("Starting timer\n");
reset_timer();
start_timer();
return 0;
}
static inline unsigned int getTimer(unsigned int start)
{
PRINTF("Ending timer\n");
return get_time();
}
void test_barrier(unsigned int nthreads)
{
#pragma omp parallel num_threads(nthreads) shared(nthreads)
{
unsigned int start;
int i;
float operation_cost = 0;
if (omp_get_thread_num() == 0) {
start = startTimer();
}
for (i = 0; i < NB_BARRIER_ITER; i++)
{
#pragma omp barrier
}
if (omp_get_thread_num() == 0) {
unsigned int end = getTimer(start);
operation_cost = (float) end / NB_BARRIER_ITER;
printf("BARRIER %d threads: %f cycles\n", nthreads, operation_cost);
}
}
}
void test_critical(unsigned int nthreads)
{
#pragma omp parallel num_threads(nthreads)
{
int i;
unsigned int start = startTimer();
float operation_cost = 0;
for (i = 0; i < NB_ITER; i++)
{
#pragma omp critical
{
volatile int a = 0;
}
}
#pragma omp barrier
operation_cost = (float) getTimer(start) / NB_ITER;
if (CORE_ID == 0) {
printf("CRITICAL %d threads: %.3f cycles\n", nthreads, operation_cost);
}
}
}
void test_parallel_loop_static(unsigned int nthreads)
{
int i;
int j;
unsigned int start = startTimer();
float iteration_cost = 0;
for (i = 0; i < NB_ITER; i++)
{
#pragma omp parallel for num_threads(nthreads)
for (j = 0; j < LOOP_ITER; j++)
{
volatile int a = j;
}
}
iteration_cost = ((float) getTimer(start)/(NB_ITER * LOOP_ITER));
printf("PARALLEL FOR %d threads STATIC %d iter: %.3f cycle(s) per iteration\n", nthreads, LOOP_ITER, iteration_cost);
}
void test_entry()
{
for (int i = 1; i <= pi_cl_cluster_nb_cores(); i++)
{
test_barrier(i);
}
printf("\n");
for (int i = 1; i <= pi_cl_cluster_nb_cores(); i++)
{
test_critical(i);
}
printf("\n");
for (int i = 1; i <= pi_cl_cluster_nb_cores(); i++)
{
test_parallel_loop_static (i);
}
}
void launch_test(void)
{
printf("Entering main controller\n");
uint32_t errors = 0;
uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id();
struct pi_device cluster_dev = {0};
struct pi_cluster_conf cl_conf = {0};
/* Init cluster configuration structure. */
pi_cluster_conf_init(&cl_conf);
cl_conf.id = 0; /* Set cluster ID. */
/* Configure & open cluster. */
pi_open_from_conf(&cluster_dev, &cl_conf);
if (pi_cluster_open(&cluster_dev))
{
printf("Cluster open failed !\n");
pmsis_exit(-1);
}
/* Prepare cluster task and send it to cluster. */
struct pi_cluster_task cl_task = {0};
cl_task.entry = test_entry;
cl_task.arg = NULL;
pi_cluster_send_task_to_cl(&cluster_dev, &cl_task);
pi_cluster_close(&cluster_dev);
printf("Test success !\n");
pmsis_exit(errors);
}
/* Program Entry. */
int main(void)
{
printf("\n\n\t *** OpenMP Benchmark ***\n\n");
return pmsis_kickoff((void *) launch_test);
}
|
special_random_ops.h | //
// @author raver119@gmail.com
//
#ifndef LIBND4J_SPECIAL_RANDOM_OPS_H
#define LIBND4J_SPECIAL_RANDOM_OPS_H
#include <ops/random_ops.h>
namespace randomOps {
//////////////////////////////////////////////////////////////////////
template<typename T>
class Choice {
public:
method_idx
method_X
method_XY
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
/**
* X holds data,
* Y holds probabilities
* Z will hold results
*/
// TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0
//T probSum = extraArguments[0];
__shared__ Nd4jLong xLength;
__shared__ Nd4jLong yLength;
__shared__ Nd4jLong zLength;
__shared__ int xEWS;
__shared__ int yEWS;
__shared__ int zEWS;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = (nd4j::random::RandomBuffer *) shmem;
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
dB = reinterpret_cast<unsigned char *> (state);
xLength = shape::length(xShapeBuffer);
yLength = shape::length(yShapeBuffer);
zLength = shape::length(zShapeBuffer);
xEWS = shape::elementWiseStride(xShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) {
for (Nd4jLong e = tid; e < zLength; e+=blockDim.x * gridDim.x) {
T prob = buffer->relativeT<T>(e);
T cumProb = (T) 0.0f;
for (Nd4jLong f = 0; f < yLength; f++) {
T relProb = y[f * yEWS];
cumProb += relProb;
if (prob <= cumProb || f == yLength - 1) {
z[e * zEWS] = x[f * xEWS];
f += yLength;
}
__syncthreads();
}
__syncthreads();
}
} else {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
Nd4jLong zCoord[MAX_RANK];
__shared__ int xRank;
__shared__ int yRank;
__shared__ int zRank;
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *yShape;
__shared__ Nd4jLong *zShape;
__shared__ Nd4jLong *xStride;
__shared__ Nd4jLong *yStride;
__shared__ Nd4jLong *zStride;
if (threadIdx.x == 0) {
xRank = shape::rank(xShapeBuffer);
yRank = shape::rank(yShapeBuffer);
zRank = shape::rank(zShapeBuffer);
xShape = shape::shapeOf(xShapeBuffer);
yShape = shape::shapeOf(yShapeBuffer);
zShape = shape::shapeOf(zShapeBuffer);
xStride = shape::stride(xShapeBuffer);
yStride = shape::stride(yShapeBuffer);
zStride = shape::stride(zShapeBuffer);
}
__syncthreads();
for (Nd4jLong i = tid; i < zLength; i+=blockDim.x * gridDim.x) {
shape::ind2sub(zRank, zShape, i, zCoord);
Nd4jLong zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank);
T prob = buffer->relativeT<T>(i);
T cumProb = (T) 0.0f;
for (Nd4jLong f = 0; f < yLength; f++) {
shape::ind2sub(yRank, yShape, i, yCoord);
Nd4jLong yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank);
T relProb = y[yOffset2];
cumProb += relProb;
if (prob <= cumProb || f == yLength - 1) {
shape::ind2sub(xRank, xShape, f, xCoord);
Nd4jLong xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank);
z[zOffset2] = x[xOffset2];
f += yLength;
}
__syncthreads();
}
__syncthreads();
}
}
__syncthreads();
devBuffer->rewind(zLength);
}
#endif
static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
/**
* X holds data,
* Y holds probabilities
* Z will hold results
*/
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
// TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0
//T probSum = extraArguments[0];
Nd4jLong yLength = shape::length(yShapeBuffer);
Nd4jLong zLength = shape::length(zShapeBuffer);
int xEWS = shape::elementWiseStride(xShapeBuffer);
int yEWS = shape::elementWiseStride(yShapeBuffer);
int zEWS = shape::elementWiseStride(zShapeBuffer);
int elementsPerThread = zLength / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) {
#pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided)
for (Nd4jLong e = 0; e < zLength; e++) {
T prob = buffer->relativeT<T>(e);
T cumProb = (T) 0.0f;
for (Nd4jLong f = 0; f < yLength; f++) {
T relProb = y[f * yEWS];
cumProb += relProb;
if (prob <= cumProb || f == yLength - 1) {
z[e * zEWS] = x[f * xEWS];
f += yLength;
}
}
}
} else {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
Nd4jLong zCoord[MAX_RANK];
int xRank = shape::rank(xShapeBuffer);
int yRank = shape::rank(yShapeBuffer);
int zRank = shape::rank(zShapeBuffer);
auto xShape = shape::shapeOf(xShapeBuffer);
auto yShape = shape::shapeOf(yShapeBuffer);
auto zShape = shape::shapeOf(zShapeBuffer);
auto xStride = shape::stride(xShapeBuffer);
auto yStride = shape::stride(yShapeBuffer);
auto zStride = shape::stride(zShapeBuffer);
#pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided)
for (Nd4jLong i = 0; i < zLength; i++) {
shape::ind2sub(zRank, zShape, i, zCoord);
Nd4jLong zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank);
T prob = buffer->relativeT<T>(i);
T cumProb = (T) 0.0f;
for (Nd4jLong f = 0; f < yLength; f++) {
shape::ind2sub(yRank, yShape, i, yCoord);
Nd4jLong yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank);
T relProb = y[yOffset2];
cumProb += relProb;
if (prob <= cumProb || f == yLength - 1) {
shape::ind2sub(xRank, xShape, f, xCoord);
Nd4jLong xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank);
z[zOffset2] = x[xOffset2];
f += yLength;
}
}
}
}
// update rng state
buffer->rewindH(zLength);
}
};
//////////////////////////////////////////////////////////////////////
/**
* This Op produces random values within specified boundaries. Distribuion is Gaussian
*/
template<typename T>
class GaussianDistribution {
public:
method_XY
method_X
method_idx
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
__shared__ T epsilon;
__shared__ T two_pi;
__shared__ Nd4jLong zLength;
__shared__ int zEWS;
__shared__ int yEWS;
__shared__ T mean;
__shared__ T stddev;
__shared__ int step;
__shared__ T *tZ;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = (nd4j::random::RandomBuffer *) shmem;
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
dB = reinterpret_cast<unsigned char *> (state);
tZ = (T *) (shmem + sizeof(nd4j::random::RandomBuffer));
zLength = shape::length(zShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
epsilon = (T) 1e-5;
two_pi = (T) 2.0 * (T) 3.14159265358979323846;
mean = extraArguments[0];
stddev = extraArguments[1];
step = (blockDim.x * gridDim.x);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong e = tid; e < zLength; e += step) {
// we need to get random values
tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, (T) 1.0f);
// fix for "next rng value"
if (e + 1 >= zLength && e % 2 == 0) {
tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, (T) 1.0f);
}
T realMean = y == z ? mean : y[e * yEWS];
__syncthreads();
if (e % 2 == 0)
z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean;
else
z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean;
__syncthreads();
}
__syncthreads();
devBuffer->rewind(zLength);
}
#endif
static inline void
specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
const T two_pi = (T) 2.0 * 3.14159265358979323846;
auto zLength = shape::length(zShapeBuffer);
auto yEWS = shape::elementWiseStride(yShapeBuffer);
auto zEWS = shape::elementWiseStride(zShapeBuffer);
int elementsPerThread = zLength / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
int span = (zLength / _threads) + 8;
// we're enforcing even chunks, since it's mandatory for this algorithm
span -= span % 2;
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
T mean = extraArguments[0];
T stddev = extraArguments[1];
#pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread)
{
int tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > zLength) end = zLength;
T z0, z1;
T u0, u1;
T lnU0;
bool generated = false;
for (Nd4jLong e = start; e < end; e++) {
if (!generated) {
/*
* Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries
*/
u0 = buffer->relativeT<T>(e, (T) 1e-5f, (T) 1.0f);
u1 = buffer->relativeT<T>((e + 1), (T) 1e-5f, (T) 1.0f);
lnU0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0));
z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1);
z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1);
generated = true;
T realMean = y == z ? mean : y[e * yEWS];
z[e * zEWS] = z0 * stddev + realMean;
} else {
T realMean = y == z ? mean : y[e * yEWS];
z[e * zEWS] = z1 * stddev + realMean;
generated = false;
}
}
}
// update rng state
buffer->rewindH(zLength);
}
};
//////////////////////////////////////////////////////////////////////
/**
* This Op produces random values within [0..N], Distribuion is binomial
*/
template<typename T>
class BinomialDistribution {
public:
method_XY
method_X
method_idx
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
int trials = (int) extraArguments[0];
T prob = extraArguments[1];
__shared__ Nd4jLong zLength;
__shared__ int yEWS;
__shared__ int zEWS;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = (nd4j::random::RandomBuffer *) shmem;
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
dB = reinterpret_cast<unsigned char *> (state);
zLength = shape::length(zShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) {
int success = 0;
for (int t = 1; t <= trials; t++) {
T randVal = buffer->relativeT<T>((e+1) * t);
if (y != z) {
// we're using external probs
prob = y[(t-1) * yEWS];
}
if (randVal < prob)
success++;
}
// we need this, to eliminate excessive code branching in runtime
__syncthreads();
// if trials is set to 0, effectively we just have successful memset
z[e * zEWS] = (T) success;
}
__syncthreads();
if (trials > 0)
devBuffer->rewind(zLength * trials);
}
#endif
static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
int trials = (int) extraArguments[0];
Nd4jLong zLength = shape::length(zShapeBuffer);
auto yEWS = shape::elementWiseStride(yShapeBuffer);
auto zEWS = shape::elementWiseStride(zShapeBuffer);
int elementsPerThread = zLength / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
int span = (zLength / _threads) + 8;
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
#pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread)
{
int tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > zLength) end = zLength;
T prob = extraArguments[1];
for (Nd4jLong e = start; e < end; e++) {
int success = 0;
for (int t = 1; t <= trials; t++) {
T randVal = buffer->relativeT<T>((e+1) * t);
if (y != z) {
// we're using external probs
prob = y[(t-1) * yEWS];
}
if (randVal < prob)
success++;
}
// if trials is set to 0, effectively we just have successful memset
z[e * zEWS] = (T) success;
}
}
// update rng state
if (trials > 0)
buffer->rewindH(zLength * trials);
}
};
//////////////////////////////////////////////////////////////////////
/**
* This Op produces random values within [0..N], Distribuion is binomial
*/
template<typename T>
class BinomialDistributionEx {
public:
method_XY
method_X
method_idx
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
int trials = (int) extraArguments[0];
T prob = extraArguments[1];
__shared__ Nd4jLong zLength;
__shared__ int yEWS;
__shared__ int zEWS;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = (nd4j::random::RandomBuffer *) shmem;
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
dB = reinterpret_cast<unsigned char *> (state);
zLength = shape::length(zShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) {
int success = 0;
for (int t = 1; t <= trials; t++) {
T randVal = buffer->relativeT<T>((e+1) * t);
if (y != z) {
// we're using external probs
prob = y[e * yEWS];
}
if (randVal < prob)
success++;
}
// we need this, to eliminate excessive code branching in runtime
__syncthreads();
// if trials is set to 0, effectively we just have successful memset
z[e * zEWS] = (T) success;
}
__syncthreads();
if (trials > 0)
devBuffer->rewind(zLength * trials);
}
#endif
static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
int trials = (int) extraArguments[0];
Nd4jLong zLength = shape::length(zShapeBuffer);
int yEWS = shape::elementWiseStride(yShapeBuffer);
int zEWS = shape::elementWiseStride(zShapeBuffer);
int elementsPerThread = zLength / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
int span = (zLength / _threads) + 8;
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
#pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread)
{
int tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > zLength) end = zLength;
T prob = extraArguments[1];
for (Nd4jLong e = start; e < end; e++) {
int success = 0;
for (int t = 1; t <= trials; t++) {
T randVal = buffer->relativeT<T>((e+1) * t);
if (y != z) {
// we're using external probs
prob = y[e * yEWS];
}
if (randVal < prob)
success++;
}
// if trials is set to 0, effectively we just have successful memset
z[e * zEWS] = (T) success;
}
}
// update rng state
if (trials > 0)
buffer->rewindH(zLength * trials);
}
};
//////////////////////////////////////////////////////////////////////
// This Op produces random Gaussian values within [mean-2*stddev,mean+2*stddev]
template<typename T>
class TruncatedNormalDistribution {
public:
method_XY
method_X
method_idx
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
__shared__ T epsilon;
__shared__ T two_pi;
__shared__ Nd4jLong zLength;
__shared__ int zEWS;
__shared__ int yEWS;
__shared__ T mean;
__shared__ T stddev;
__shared__ int step;
__shared__ T *tZ;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = (nd4j::random::RandomBuffer *) shmem;
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
dB = reinterpret_cast<unsigned char *> (state);
tZ = (T *) (shmem + sizeof(nd4j::random::RandomBuffer));
zLength = shape::length(zShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
epsilon = (T) 1e-6f;
two_pi = (T) 2.0 * (T) 3.14159265358979323846;
mean = extraArguments[0];
stddev = extraArguments[1];
step = (blockDim.x * gridDim.x);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1;
T result0, result1, u0, u1, z0, z1;
T ds = nd4j::math::nd4j_abs<T>(stddev) * (T) 2.0f;
for (Nd4jLong e = tid; e < middle; e += step) {
// we need to get random values
Nd4jLong generation0 = 0;
T realMean0 = y == z ? mean : y[e * yEWS];
T realMean1 = y == z ? mean : y[(e + middle) * yEWS];
do {
T u0 = buffer->relativeT<T>(e + generation0, epsilon, (T) 1.0f);
T u1 = buffer->relativeT<T>(e + middle + generation0, epsilon, (T) 1.0f);
z0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)) * nd4j::math::nd4j_cos<T>(two_pi * u1);
z1 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)) * nd4j::math::nd4j_sin<T>(two_pi * u1);
result0 = z0 * stddev + realMean0;
result1 = z1 * stddev + realMean1;
generation0 += zLength;
} while (nd4j::math::nd4j_abs<T>(realMean0) + nd4j::math::nd4j_abs<T>(result0) > ds || nd4j::math::nd4j_abs<T>(realMean1) + nd4j::math::nd4j_abs<T>(result1) > ds);
z[e*zEWS] = result0;
if((e+middle) < zLength)
z[(e + middle) * zEWS] = result1;
}
__syncthreads();
devBuffer->rewind(zLength);
}
#endif
static inline void
specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
const T two_pi = (T) 2.0 * 3.14159265358979323846;
Nd4jLong zLength = shape::length(zShapeBuffer);
auto yEWS = shape::elementWiseStride(yShapeBuffer);
auto zEWS = shape::elementWiseStride(zShapeBuffer);
auto middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1;
int elementsPerThread = middle / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
int span = (middle / _threads) + 8;
// we're enforcing even chunks, since it's mandatory for this algorithm
span -= span % 2;
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
T mean = extraArguments[0];
T stddev = extraArguments[1];
#pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread)
{
int tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > middle) {
end = middle;
}
T z0, z1;
T u0, u1;
T result0, result1, lnu0;
T ds = nd4j::math::nd4j_abs<T>(stddev) * (T) 2.0f;
for (Nd4jLong e = start; e < end; e++) {
/*
* Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries
*/
Nd4jLong generation0 = 0;
T realMean0 = y == z ? mean : y[e * yEWS];
T realMean1 = y == z ? mean : y[(e + middle) * yEWS];
do {
u0 = buffer->relativeT<T>(e + generation0, (T) 1e-6f, (T) 1.0f);
u1 = buffer->relativeT<T>((e + middle + generation0), (T) 1e-6f, (T) 1.0f);
lnu0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0));
z0 = lnu0 * nd4j::math::nd4j_cos<T>(two_pi * u1);
z1 = lnu0 * nd4j::math::nd4j_sin<T>(two_pi * u1);
result0 = z0 * stddev + realMean0;
result1 = z1 * stddev + realMean1;
generation0 += zLength;
} while (nd4j::math::nd4j_abs<T>(realMean0) + nd4j::math::nd4j_abs<T>(result0) > ds || nd4j::math::nd4j_abs<T>(realMean1) + nd4j::math::nd4j_abs<T>(result1) > ds);
z[e*zEWS] = result0;
if((e+middle) < zLength)
z[(e + middle) * zEWS] = result1;
}
}
// update rng state
buffer->rewindH(zLength);
}
};
//////////////////////////////////////////////////////////////////////
// This Op produces random Log-normal distribution
template<typename T>
class LogNormalDistribution {
public:
method_XY
method_X
method_idx
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
__shared__ T epsilon;
__shared__ T two_pi;
__shared__ Nd4jLong zLength;
__shared__ int zEWS;
__shared__ int yEWS;
__shared__ T mean;
__shared__ T stddev;
__shared__ int step;
__shared__ T *tZ;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = (nd4j::random::RandomBuffer *) shmem;
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
dB = reinterpret_cast<unsigned char *> (state);
tZ = (T *) (shmem + sizeof(nd4j::random::RandomBuffer));
zLength = shape::length(zShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
epsilon = (T) 1e-5;
two_pi = (T) 2.0 * (T) 3.14159265358979323846;
mean = extraArguments[0];
stddev = extraArguments[1];
step = (blockDim.x * gridDim.x);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong e = tid; e < zLength; e += step) {
// we need to get random values
tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, (T) 1.0f);
// fix for "next rng value"
if (e + 1 >= zLength && e % 2 == 0) {
tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, (T) 1.0f);
}
T realMean = y == z ? mean : y[e * yEWS];
__syncthreads();
if (e % 2 == 0)
z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean);
else
z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean);
__syncthreads();
}
__syncthreads();
devBuffer->rewind(zLength);
}
#endif
static inline void
specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
const T two_pi = (T) 2.0 * 3.14159265358979323846;
Nd4jLong zLength = shape::length(zShapeBuffer);
auto yEWS = shape::elementWiseStride(yShapeBuffer);
auto zEWS = shape::elementWiseStride(zShapeBuffer);
int elementsPerThread = zLength / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
int span = (zLength / _threads) + 8;
// we're enforcing even chunks, since it's mandatory for this algorithm
span -= span % 2;
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
T mean = extraArguments[0];
T stddev = extraArguments[1];
#pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread)
{
int tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > zLength) end = zLength;
T z0, z1;
T u0, u1;
T lnU0;
bool generated = false;
for (Nd4jLong e = start; e < end; e++) {
if (!generated) {
/*
* Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries
*/
u0 = buffer->relativeT<T>(e, (T) 1e-5f, (T) 1.0f);
u1 = buffer->relativeT<T>((e + 1), (T) 1e-5f, (T) 1.0f);
lnU0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0));
z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1);
z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1);
generated = true;
T realMean = y == z ? mean : y[e * yEWS];
z[e * zEWS] = nd4j::math::nd4j_exp<T>(z0 * stddev + realMean);
} else {
T realMean = y == z ? mean : y[e * yEWS];
z[e * zEWS] = nd4j::math::nd4j_exp<T>(z1 * stddev + realMean);
generated = false;
}
}
}
// update rng state
buffer->rewindH(zLength);
}
};
}
#endif //LIBND4J_SPECIAL_RANDOM_OPS_H
|
mandelbrot-9.c | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// Contributed by Jeremy Zerfas
// This is the square of the limit that pixels will need to exceed in order to
// escape from the Mandelbrot set.
#define LIMIT_SQUARED 4.0
// This controls the maximum amount of iterations that are done for each pixel.
#define MAXIMUM_ITERATIONS 50
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
// intptr_t should be the native integer type on most sane systems.
typedef intptr_t intnative_t;
int main(int argc, char ** argv){
// Ensure image_Width_And_Height are multiples of 8.
const intnative_t image_Width_And_Height=(atoi(argv[1])+7)/8*8;
// The image will be black and white with one bit for each pixel. Bits with
// a value of zero are white pixels which are the ones that "escape" from
// the Mandelbrot set. We'll be working on one line at a time and each line
// will be made up of pixel groups that are eight pixels in size so each
// pixel group will be one byte. This allows for some more optimizations to
// be done.
uint8_t * const pixels=malloc(image_Width_And_Height*
image_Width_And_Height/8);
// Precompute the initial real and imaginary values for each x and y
// coordinate in the image.
double initial_r[image_Width_And_Height], initial_i[image_Width_And_Height];
#pragma omp parallel for
for(intnative_t xy=0; xy<image_Width_And_Height; xy++){
initial_r[xy]=2.0/image_Width_And_Height*xy - 1.5;
initial_i[xy]=2.0/image_Width_And_Height*xy - 1.0;
}
#pragma omp parallel for schedule(guided)
for(intnative_t y=0; y<image_Width_And_Height; y++){
const double prefetched_Initial_i=initial_i[y];
for(intnative_t x_Major=0; x_Major<image_Width_And_Height; x_Major+=8){
// pixel_Group_r and pixel_Group_i will store real and imaginary
// values for each pixel in the current pixel group as we perform
// iterations. Set their initial values here.
double pixel_Group_r[8], pixel_Group_i[8];
for(intnative_t x_Minor=0; x_Minor<8; x_Minor++){
pixel_Group_r[x_Minor]=initial_r[x_Major+x_Minor];
pixel_Group_i[x_Minor]=prefetched_Initial_i;
}
// Assume all pixels are in the Mandelbrot set initially.
uint8_t eight_Pixels=0xff;
intnative_t iteration=MAXIMUM_ITERATIONS;
do{
uint8_t current_Pixel_Bitmask=0x80;
for(intnative_t x_Minor=0; x_Minor<8; x_Minor++){
const double r=pixel_Group_r[x_Minor];
const double i=pixel_Group_i[x_Minor];
pixel_Group_r[x_Minor]=r*r - i*i +
initial_r[x_Major+x_Minor];
pixel_Group_i[x_Minor]=2.0*r*i + prefetched_Initial_i;
// Clear the bit for the pixel if it escapes from the
// Mandelbrot set.
if(r*r + i*i>LIMIT_SQUARED)
eight_Pixels&=~current_Pixel_Bitmask;
current_Pixel_Bitmask>>=1;
}
}while(eight_Pixels && --iteration);
pixels[y*image_Width_And_Height/8 + x_Major/8]=eight_Pixels;
}
}
// Output the image to stdout.
fprintf(stdout, "P4\n%jd %jd\n", (intmax_t)image_Width_And_Height,
(intmax_t)image_Width_And_Height);
fwrite(pixels, image_Width_And_Height*image_Width_And_Height/8, 1, stdout);
free(pixels);
return 0;
}
|
krb5pa-sha1_fmt_plug.c | /*
* Kerberos 5 "PA ENC TIMESTAMP" by magnum (modified by Dhiru)
*
* Pcap file -> input file:
* 1. tshark -r capture.pcapng -T pdml > ~/capture.pdml
* 2. krbng2john.py ~/capture.pdml > krb5.in
* 3. Run john on krb5.in
*
* http://www.ietf.org/rfc/rfc4757.txt
* http://www.securiteam.com/windowsntfocus/5BP0H0A6KM.html
*
* Input format is 'user:$krb5pa$etype$user$realm$salt$timestamp+checksum'
*
* NOTE: Checksum implies last 12 bytes of PA_ENC_TIMESTAMP value in AS-REQ
* packet.
*
* Default Salt: realm + user
*
* AES-256 encryption & decryption of AS-REQ timestamp in Kerberos v5
* See the following RFC for more details about the crypto & algorithms used:
*
* RFC3961 - Encryption and Checksum Specifications for Kerberos 5
* RFC3962 - Advanced Encryption Standard (AES) Encryption for Kerberos 5
*
* march 09 / kevin devine <wyse101 0x40 gmail.com>
*
* This software is Copyright (c) 2011 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* This software is Copyright (c) 2012 Dhiru Kholia (dhiru at openwall.com) and
* released under same terms as above
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_krb5pa;
#elif FMT_REGISTERS_H
john_register_one(&fmt_krb5pa);
#else
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "formats.h"
#include "options.h"
#include "common.h"
#include "unicode.h"
#include "johnswap.h"
#include "aes.h"
#include "hmac_sha.h"
#include "pbkdf2_hmac_sha1.h"
#include "loader.h"
#include "memdbg.h"
#define FORMAT_LABEL "krb5pa-sha1"
#define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 17/18" /* aes-cts-hmac-sha1-96 */
#define FORMAT_TAG "$krb5pa$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 12
#define BINARY_ALIGN 4
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define MAX_SALTLEN 128
#define MAX_REALMLEN 64
#define MAX_USERLEN 64
#define TIMESTAMP_SIZE 44
#define CHECKSUM_SIZE BINARY_SIZE
#define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN)
static struct fmt_tests tests[] = {
{"$krb5pa$18$user1$EXAMPLE.COM$$2a0e68168d1eac344da458599c3a2b33ff326a061449fcbc242b212504e484d45903c6a16e2d593912f56c93883bf697b325193d62a8be9c", "openwall"},
{"$krb5pa$18$user1$EXAMPLE.COM$$a3918bd0381107feedec8db0022bdf3ac56e534ed54d13c62a7013a47713cfc31ef4e7e572f912fa4164f76b335e588bf29c2d17b11c5caa", "openwall"},
{"$krb5pa$18$l33t$EXAMPLE.COM$$98f732b309a1d7ef2355a974842a32894d911e97150f5d57f248e1c2632fbd3735c5f156532ccae0341e6a2d779ca83a06021fe57dafa464", "openwall"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$$64dfeee04be2b2e0423814e0df4d0f960885aca4efffe6cb5694c4d34690406071c4968abd2c153ee42d258c5e09a41269bbcd7799f478d3", "password@123"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$AD.EXAMPLE.COMaduser$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"},
/* etype 17 hash obtained using MiTM etype downgrade attack */
{"$krb5pa$17$user1$EXAMPLE.COM$$c5461873dc13665771b98ba80be53939e906d90ae1ba79cf2e21f0395e50ee56379fbef4d0298cfccfd6cf8f907329120048fd05e8ae5df4", "openwall"},
{NULL},
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int etype;
unsigned char realm[64];
unsigned char user[64];
unsigned char salt[128]; /* realm + user */
unsigned char ct[44];
} *cur_salt;
static unsigned char constant[16];
static unsigned char ke_input[16];
static unsigned char ki_input[16];
/* n-fold(k-bits):
* l = lcm(n,k)
* r = l/k
* s = k-bits | k-bits rot 13 | k-bits rot 13*2 | ... | k-bits rot 13*(r-1)
* compute the 1's complement sum:
* n-fold = s[0..n-1]+s[n..2n-1]+s[2n..3n-1]+..+s[(k-1)*n..k*n-1] */
/* representation: msb first, assume n and k are multiples of 8, and
* that k>=16. this is the case of all the cryptosystems which are
* likely to be used. this function can be replaced if that
* assumption ever fails. */
/* input length is in bits */
static void nfold(unsigned int inbits, const unsigned char *in,
unsigned int outbits,unsigned char *out)
{
int a,b,c,lcm;
int byte, i, msbit;
/* the code below is more readable if I make these bytes
* instead of bits */
inbits >>= 3;
outbits >>= 3;
/* first compute lcm(n,k) */
a = outbits;
b = inbits;
while (b != 0) {
c = b;
b = a % b;
a = c;
}
lcm = outbits*inbits/a;
/* now do the real work */
memset(out, 0, outbits);
byte = 0;
/* this will end up cycling through k lcm(k,n)/k times, which
* is correct */
for (i = lcm - 1; i >= 0; i--) {
/* compute the msbit in k which gets added into this byte */
msbit = (/* first, start with the msbit in the first, unrotated byte */
((inbits << 3) - 1)
/* then, for each byte, shift to the right for each
* repetition */
+(((inbits << 3) + 13) * (i / inbits))
/* last, pick out the correct byte within that
* shifted repetition */
+((inbits - (i % inbits)) << 3)
) % (inbits << 3);
/* pull out the byte value itself */
byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)|
(in[((inbits) - (msbit>>3)) % inbits]))
>>((msbit & 7) + 1)) & 0xff;
/* do the addition */
byte += out[i % outbits];
out[i % outbits] = byte & 0xff;
/* keep around the carry bit, if any */
byte >>= 8;
}
/* if there's a carry bit left over, add it back in */
if (byte) {
for (i = outbits - 1; i >= 0; i--) {
/* do the addition */
byte += out[i];
out[i] = byte & 0xff;
/* keep around the carry bit, if any */
byte >>= 8;\
}
}
}
static void init(struct fmt_main *self)
{
unsigned char usage[5];
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
// generate 128 bits from 40 bits of "kerberos" string
nfold(8 * 8, (unsigned char*)"kerberos", 128, constant);
memset(usage,0,sizeof(usage));
usage[3] = 0x01; // key number in big-endian format
usage[4] = 0xAA; // used to derive Ke
nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input);
memset(usage,0,sizeof(usage));
usage[3] = 0x01; // key number in big-endian format
usage[4] = 0x55; // used to derive Ki
nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *data = ciphertext;
int type, saltlen = 0;
// tag is mandatory
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
data += FORMAT_TAG_LEN;
// etype field, 17 or 18
p = strchr(data, '$');
if (!p || p - data != 2)
return 0;
type = atoi(data);
if (type < 17 || type > 18)
return 0;
data = p + 1;
// user field
p = strchr(data, '$');
if (!p || p - data > MAX_USERLEN)
return 0;
saltlen += p - data;
data = p + 1;
// realm field
p = strchr(data, '$');
if (!p || p - data > MAX_REALMLEN)
return 0;
saltlen += p - data;
data = p + 1;
// salt field
p = strchr(data, '$');
if (!p)
return 0;
// if salt is empty, realm.user is used instead
if (p - data)
saltlen = p - data;
data = p + 1;
// We support a max. total salt length of 52.
// We could opt to emit a warning if rejected here.
if(saltlen > MAX_SALTLEN) {
static int warned = 0;
if (!ldr_in_pot)
if (!warned++)
fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL);
return 0;
}
// 56 bytes (112 hex chars) encrypted timestamp + checksum
if (strlen(data) != 2 * (TIMESTAMP_SIZE + CHECKSUM_SIZE) ||
strspn(data, HEXCHARS_all) != strlen(data))
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN;
p = strtokm(ctcopy, "$");
cs.etype = atoi(p);
p = strtokm(NULL, "$");
if (p[-1] == '$')
cs.user[0] = 0;
else {
strcpy((char*)cs.user, p);
p = strtokm(NULL, "$");
}
if (p[-1] == '$')
cs.realm[0] = 0;
else {
strcpy((char*)cs.realm, p);
p = strtokm(NULL, "$");
}
if (p[-1] == '$') {
strcpy((char*)cs.salt, (char*)cs.realm);
strcat((char*)cs.salt, (char*)cs.user);
} else {
strcpy((char*)cs.salt, p);
p = strtokm(NULL, "$");
}
for (i = 0; i < TIMESTAMP_SIZE; i++)
cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[TOTAL_LENGTH + 1];
char in[TOTAL_LENGTH + 1];
char salt[MAX_SALTLEN + 1];
char *data;
char *e, *u, *r, *s, *tc;
strnzcpy(in, ciphertext, sizeof(in));
tc = strrchr(in, '$'); *tc++ = 0;
s = strrchr(in, '$'); *s++ = 0;
r = strrchr(in, '$'); *r++ = 0;
u = strrchr(in, '$'); *u++ = 0;
e = in + 8;
/* Default salt is user.realm */
if (!*s) {
snprintf(salt, sizeof(salt), "%s%s", r, u);
s = salt;
}
snprintf(out, sizeof(out), "%s%s$%s$%s$%s$%s", FORMAT_TAG, e, u, r, s, tc);
data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1;
strlwr(data);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1 + TIMESTAMP_SIZE * 2; /* skip to checksum field */
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void
AES_cts_encrypt(const unsigned char *in, unsigned char *out,
size_t len, const AES_KEY *key,
unsigned char *ivec, const int encryptp)
{
unsigned char tmp[AES_BLOCK_SIZE];
unsigned int i;
if (encryptp) {
while(len > AES_BLOCK_SIZE) {
for (i = 0; i < AES_BLOCK_SIZE; i++)
tmp[i] = in[i] ^ ivec[i];
AES_encrypt(tmp, out, key);
memcpy(ivec, out, AES_BLOCK_SIZE);
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
for (i = 0; i < len; i++)
tmp[i] = in[i] ^ ivec[i];
for (; i < AES_BLOCK_SIZE; i++)
tmp[i] = 0 ^ ivec[i];
AES_encrypt(tmp, out - AES_BLOCK_SIZE, key);
memcpy(out, ivec, len);
memcpy(ivec, out - AES_BLOCK_SIZE, AES_BLOCK_SIZE);
} else {
unsigned char tmp2[AES_BLOCK_SIZE];
unsigned char tmp3[AES_BLOCK_SIZE];
while(len > AES_BLOCK_SIZE * 2) {
memcpy(tmp, in, AES_BLOCK_SIZE);
AES_decrypt(in, out, key);
for (i = 0; i < AES_BLOCK_SIZE; i++)
out[i] ^= ivec[i];
memcpy(ivec, tmp, AES_BLOCK_SIZE);
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
len -= AES_BLOCK_SIZE;
memcpy(tmp, in, AES_BLOCK_SIZE); /* save last iv */
AES_decrypt(in, tmp2, key);
memcpy(tmp3, in + AES_BLOCK_SIZE, len);
memcpy(tmp3 + len, tmp2 + len, AES_BLOCK_SIZE - len); /* xor 0 */
for (i = 0; i < len; i++)
out[i + AES_BLOCK_SIZE] = tmp2[i] ^ tmp3[i];
AES_decrypt(tmp3, out, key);
for (i = 0; i < AES_BLOCK_SIZE; i++)
out[i] ^= ivec[i];
memcpy(ivec, tmp, AES_BLOCK_SIZE);
}
}
// keysize = 32 for 256 bits, 16 for 128 bits
static void dk(unsigned char key_out[], unsigned char key_in[],
size_t key_size, unsigned char ptext[], size_t ptext_size)
{
unsigned char iv[32];
unsigned char plaintext[32];
AES_KEY ekey;
memset(iv,0,sizeof(iv));
memset(plaintext,0,sizeof(plaintext));
memcpy(plaintext,ptext,16);
AES_set_encrypt_key(key_in,key_size*8,&ekey);
AES_cbc_encrypt(plaintext,key_out,key_size,&ekey,iv,AES_ENCRYPT);
}
static void krb_decrypt(const unsigned char ciphertext[], size_t ctext_size,
unsigned char plaintext[], const unsigned char key[], size_t key_size)
{
unsigned char iv[32];
AES_KEY ekey;
memset(iv,0,sizeof(iv));
AES_set_decrypt_key(key,key_size*8,&ekey);
AES_cts_encrypt(ciphertext,plaintext,ctext_size,&ekey,iv,AES_DECRYPT);
}
#if 0 /* This is not used */
static void krb_encrypt(const unsigned char ciphertext[], size_t ctext_size,
unsigned char plaintext[], const unsigned char key[], size_t key_size)
{
unsigned char iv[32];
AES_KEY ekey;
memset(iv,0,sizeof(iv));
AES_set_encrypt_key(key,key_size*8,&ekey);
AES_cts_encrypt(ciphertext,plaintext,ctext_size,&ekey,iv,AES_ENCRYPT);
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char tkey[MAX_KEYS_PER_CRYPT][32];
unsigned char base_key[32];
unsigned char Ke[32];
unsigned char plaintext[44];
int key_size, i;
int len[MAX_KEYS_PER_CRYPT];
#ifdef SIMD_COEF_32
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
pout[i] = tkey[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt,strlen((char*)cur_salt->salt), 4096, pout, 32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len[i] = strlen(saved_key[index+i]);
}
pbkdf2_sha1((const unsigned char*)saved_key[index], len[0],
cur_salt->salt,strlen((char*)cur_salt->salt),
4096, tkey[0], 32, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
// generate 128 bits from 40 bits of "kerberos" string
// This is precomputed in init()
//nfold(8 * 8, (unsigned char*)"kerberos", 128, constant);
if (cur_salt->etype == 17)
key_size = 16;
else
key_size = 32;
dk(base_key, tkey[i], key_size, constant, 32);
/* The "well-known constant" used for the DK function is the key usage number,
* expressed as four octets in big-endian order, followed by one octet indicated below.
* Kc = DK(base-key, usage | 0x99);
* Ke = DK(base-key, usage | 0xAA);
* Ki = DK(base-key, usage | 0x55); */
// derive Ke for decryption/encryption
// This is precomputed in init()
//memset(usage,0,sizeof(usage));
//usage[3] = 0x01; // key number in big-endian format
//usage[4] = 0xAA; // used to derive Ke
//nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input);
dk(Ke, base_key, key_size, ke_input, 32);
// decrypt the AS-REQ timestamp encrypted with 256-bit AES
// here is enough to check the string, further computation below is required
// to fully verify the checksum
krb_decrypt(cur_salt->ct,44,plaintext,Ke, key_size);
// Check a couple bytes from known plain (YYYYMMDDHHMMSSZ) and
// bail out if we are out of luck.
if (plaintext[22] == '2' && plaintext[23] == '0' && plaintext[36] == 'Z') {
unsigned char Ki[32];
unsigned char checksum[20];
// derive Ki used in HMAC-SHA-1 checksum
// This is precomputed in init()
//memset(usage,0,sizeof(usage));
//usage[3] = 0x01; // key number in big-endian format
//usage[4] = 0x55; // used to derive Ki
//nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input);
dk(Ki,base_key, key_size, ki_input, 32);
// derive checksum of plaintext
hmac_sha1(Ki, key_size, plaintext, 44, checksum, 20);
memcpy(crypt_out[index+i], checksum, BINARY_SIZE);
} else {
memset(crypt_out[index+i], 0, BINARY_SIZE);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_krb5pa = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
for_simple.c | /* PMSIS includes */
#include "pmsis.h"
#include "omp.h"
#define NB_CORES (8)
static int32_t core_iterations[NB_CORES] = { 0 };
static uint32_t errors = 0;
/* Cluster main entry, executed by core 0. */
void cluster_delegate(void *arg)
{
printf("Cluster master core entry\n");
#pragma omp parallel num_threads(NB_CORES)
{
printf("[%d %d] Fork entry\n", pi_cluster_id(), omp_get_thread_num() );
#pragma omp for
for (int i=0; i<64; i++)
{
int32_t core_id = omp_get_thread_num();
if (core_id > NB_CORES)
{
errors++;
}
else
{
core_iterations[core_id]++;
}
printf("[%d %d] for entry index %d\n", pi_cluster_id(), omp_get_thread_num(), i );
}
}
for (int i = 0; i < NB_CORES; i++)
{
if (core_iterations[i] == 0)
{
errors++;
printf("Core #%d has no iteration\n", i);
}
}
printf("Cluster master core exit\n");
}
void helloworld(void)
{
printf("Entering main controller\n");
uint32_t errors = 0;
uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id();
printf("[%d %d] Hello World!\n", cluster_id, core_id);
struct pi_device cluster_dev;
struct pi_cluster_conf cl_conf;
/* Init cluster configuration structure. */
pi_cluster_conf_init(&cl_conf);
cl_conf.id = 0; /* Set cluster ID. */
/* Configure & open cluster. */
pi_open_from_conf(&cluster_dev, &cl_conf);
if (pi_cluster_open(&cluster_dev))
{
printf("Cluster open failed !\n");
pmsis_exit(-1);
}
/* Prepare cluster task and send it to cluster. */
struct pi_cluster_task cl_task;
pi_cluster_task(&cl_task, cluster_delegate, NULL);
pi_cluster_send_task_to_cl(&cluster_dev, &cl_task);
pi_cluster_close(&cluster_dev);
if (errors)
{
printf("Test failed!\n");
}
else
{
printf("Test success!\n");
}
pmsis_exit(errors);
}
/* Program Entry. */
int main(void)
{
printf("\n\n\t *** PMSIS HelloWorld ***\n\n");
return pmsis_kickoff((void *) helloworld);
}
|
transforms_mpfr.c | void ft_mpfr_destroy_plan(mpfr_t * A, int n) {
for (int j = 0; j < n; j++)
for (int i = 0; i < n; i++)
mpfr_clear(A[i+j*n]);
free(A);
}
void ft_mpfr_destroy_triangular_banded(ft_mpfr_triangular_banded * A) {
for (int j = 0; j < A->n; j++)
for (int i = 0; i < A->b+1; i++)
mpfr_clear(A->data[i+j*(A->b+1)]);
free(A->data);
free(A);
}
// x ← A*x, x ← Aᵀ*x
void ft_mpfr_trmv(char TRANS, int n, mpfr_t * A, int LDA, mpfr_t * x, mpfr_rnd_t rnd) {
if (TRANS == 'N') {
for (int j = 0; j < n; j++) {
for (int i = 0; i < j; i++)
mpfr_fma(x[i], A[i+j*LDA], x[j], x[i], rnd);
mpfr_mul(x[j], A[j+j*LDA], x[j], rnd);
}
}
else if (TRANS == 'T') {
for (int i = n-1; i >= 0; i--) {
mpfr_mul(x[i], A[i+i*LDA], x[i], rnd);
for (int j = i-1; j >= 0; j--)
mpfr_fma(x[i], A[j+i*LDA], x[j], x[i], rnd);
}
}
}
// x ← A⁻¹*x, x ← A⁻ᵀ*x
void ft_mpfr_trsv(char TRANS, int n, mpfr_t * A, int LDA, mpfr_t * x, mpfr_rnd_t rnd) {
if (TRANS == 'N') {
for (int j = n-1; j >= 0; j--) {
mpfr_div(x[j], x[j], A[j+j*LDA], rnd);
for (int i = 0; i < j; i++) {
mpfr_fms(x[i], A[i+j*LDA], x[j], x[i], rnd);
mpfr_neg(x[i], x[i], rnd);
}
}
}
else if (TRANS == 'T') {
for (int i = 0; i < n; i++) {
for (int j = 0; j < i; j++) {
mpfr_fms(x[i], A[j+i*LDA], x[j], x[i], rnd);
mpfr_neg(x[i], x[i], rnd);
}
mpfr_div(x[i], x[i], A[i+i*LDA], rnd);
}
}
}
// B ← A*B, B ← Aᵀ*B
void ft_mpfr_trmm(char TRANS, int n, mpfr_t * A, int LDA, mpfr_t * B, int LDB, int N, mpfr_rnd_t rnd) {
#pragma omp parallel for
for (int j = 0; j < N; j++)
ft_mpfr_trmv(TRANS, n, A, LDA, B+j*LDB, rnd);
}
// B ← A⁻¹*B, B ← A⁻ᵀ*B
void ft_mpfr_trsm(char TRANS, int n, mpfr_t * A, int LDA, mpfr_t * B, int LDB, int N, mpfr_rnd_t rnd) {
#pragma omp parallel for
for (int j = 0; j < N; j++)
ft_mpfr_trsv(TRANS, n, A, LDA, B+j*LDB, rnd);
}
// x ← A*x, x ← Aᵀ*x
void ft_mpfr_trmv_ptr(char TRANS, int n, mpfr_t * A, int LDA, mpfr_t ** x, mpfr_rnd_t rnd) {
if (TRANS == 'N') {
for (int j = 0; j < n; j++) {
for (int i = 0; i < j; i++)
mpfr_fma(x[i][0], A[i+j*LDA], x[j][0], x[i][0], rnd);
mpfr_mul(x[j][0], A[j+j*LDA], x[j][0], rnd);
}
}
else if (TRANS == 'T') {
for (int i = n-1; i >= 0; i--) {
mpfr_mul(x[i][0], A[i+i*LDA], x[i][0], rnd);
for (int j = i-1; j >= 0; j--)
mpfr_fma(x[i][0], A[j+i*LDA], x[j][0], x[i][0], rnd);
}
}
}
// x ← A⁻¹*x, x ← A⁻ᵀ*x
void ft_mpfr_trsv_ptr(char TRANS, int n, mpfr_t * A, int LDA, mpfr_t ** x, mpfr_rnd_t rnd) {
if (TRANS == 'N') {
for (int j = n-1; j >= 0; j--) {
mpfr_div(x[j][0], x[j][0], A[j+j*LDA], rnd);
for (int i = 0; i < j; i++) {
mpfr_fms(x[i][0], A[i+j*LDA], x[j][0], x[i][0], rnd);
mpfr_neg(x[i][0], x[i][0], rnd);
}
}
}
else if (TRANS == 'T') {
for (int i = 0; i < n; i++) {
for (int j = 0; j < i; j++) {
mpfr_fms(x[i][0], A[j+i*LDA], x[j][0], x[i][0], rnd);
mpfr_neg(x[i][0], x[i][0], rnd);
}
mpfr_div(x[i][0], x[i][0], A[i+i*LDA], rnd);
}
}
}
// B ← A*B, B ← Aᵀ*B
void ft_mpfr_trmm_ptr(char TRANS, int n, mpfr_t * A, int LDA, mpfr_t ** B, int LDB, int N, mpfr_rnd_t rnd) {
#pragma omp parallel for
for (int j = 0; j < N; j++)
ft_mpfr_trmv_ptr(TRANS, n, A, LDA, B+j*LDB, rnd);
}
// B ← A⁻¹*B, B ← A⁻ᵀ*B
void ft_mpfr_trsm_ptr(char TRANS, int n, mpfr_t * A, int LDA, mpfr_t ** B, int LDB, int N, mpfr_rnd_t rnd) {
#pragma omp parallel for
for (int j = 0; j < N; j++)
ft_mpfr_trsv_ptr(TRANS, n, A, LDA, B+j*LDB, rnd);
}
ft_mpfr_triangular_banded * ft_mpfr_calloc_triangular_banded(const int n, const int b, mpfr_prec_t prec) {
mpfr_t * data = malloc(n*(b+1)*sizeof(mpfr_t));
for (int j = 0; j < n; j++)
for (int i = 0; i < b+1; i++) {
mpfr_init2(data[i+j*(b+1)], prec);
mpfr_set_zero(data[i+j*(b+1)], 1);
}
ft_mpfr_triangular_banded * A = malloc(sizeof(ft_mpfr_triangular_banded));
A->data = data;
A->n = n;
A->b = b;
return A;
}
void ft_mpfr_get_triangular_banded_index(const ft_mpfr_triangular_banded * A, mpfr_t * v, const int i, const int j, mpfr_prec_t prec, mpfr_rnd_t rnd) {
int n = A->n, b = A->b;
if (0 <= i && 0 <= j && 0 <= j-i && j-i <= b && i < n && j < n)
mpfr_set(* v, A->data[i+(j+1)*b], rnd);
else
mpfr_set_zero(* v, 1);
return;
}
void ft_mpfr_set_triangular_banded_index(const ft_mpfr_triangular_banded * A, const mpfr_t v, const int i, const int j, mpfr_rnd_t rnd) {
int n = A->n, b = A->b;
if (0 <= i && 0 <= j && 0 <= j-i && j-i <= b && i < n && j < n)
mpfr_set(A->data[i+(j+1)*b], v, rnd);
}
void ft_mpfr_triangular_banded_eigenvalues(ft_mpfr_triangular_banded * A, ft_mpfr_triangular_banded * B, mpfr_t * lambda, mpfr_prec_t prec, mpfr_rnd_t rnd) {
mpfr_t t1, t2;
mpfr_init2(t1, prec);
mpfr_init2(t2, prec);
for (int j = 0; j < A->n; j++) {
ft_mpfr_get_triangular_banded_index(A, &t1, j, j, prec, rnd);
ft_mpfr_get_triangular_banded_index(B, &t2, j, j, prec, rnd);
mpfr_div(lambda[j], t1, t2, rnd);
}
mpfr_clear(t1);
mpfr_clear(t2);
}
// Assumes eigenvectors are initialized by V[i,j] = 0 for i > j and V[j,j] ≠ 0.
void ft_mpfr_triangular_banded_eigenvectors(ft_mpfr_triangular_banded * A, ft_mpfr_triangular_banded * B, mpfr_t * V, mpfr_prec_t prec, mpfr_rnd_t rnd) {
int n = A->n, b1 = A->b, b2 = B->b;
int b = MAX(b1, b2);
mpfr_t t, t1, t2, t3, t4, lam;
mpfr_init2(t, prec);
mpfr_init2(t1, prec);
mpfr_init2(t2, prec);
mpfr_init2(t3, prec);
mpfr_init2(t4, prec);
mpfr_init2(lam, prec);
for (int j = 1; j < n; j++) {
//lam = X(get_triangular_banded_index)(A, j, j)/X(get_triangular_banded_index)(B, j, j);
ft_mpfr_get_triangular_banded_index(A, &t1, j, j, prec, rnd);
ft_mpfr_get_triangular_banded_index(B, &t2, j, j, prec, rnd);
mpfr_div(lam, t1, t2, rnd);
for (int i = j-1; i >= 0; i--) {
//t = 0;
mpfr_set_zero(t, 1);
for (int k = i+1; k < MIN(i+b+1, n); k++) {
//t += (lam*X(get_triangular_banded_index)(B, i, k) - X(get_triangular_banded_index)(A, i, k))*V[k+j*n];
mpfr_set(t3, V[k+j*n], rnd);
ft_mpfr_get_triangular_banded_index(A, &t1, i, k, prec, rnd);
ft_mpfr_get_triangular_banded_index(B, &t2, i, k, prec, rnd);
mpfr_fms(t4, lam, t2, t1, rnd);
mpfr_fma(t, t4, t3, t, rnd);
}
//V[i+j*n] = -t/(lam*X(get_triangular_banded_index)(B, i, i) - X(get_triangular_banded_index)(A, i, i));
ft_mpfr_get_triangular_banded_index(A, &t1, i, i, prec, rnd);
ft_mpfr_get_triangular_banded_index(B, &t2, i, i, prec, rnd);
mpfr_fms(t3, lam, t2, t1, rnd);
mpfr_div(t4, t, t3, rnd);
mpfr_neg(V[i+j*n], t4, rnd);
}
}
mpfr_clear(t1);
mpfr_clear(t2);
mpfr_clear(t3);
mpfr_clear(t4);
}
static inline ft_mpfr_triangular_banded * ft_mpfr_create_A_legendre_to_chebyshev(const int n, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * A = ft_mpfr_calloc_triangular_banded(n, 2, prec);
mpfr_t v;
mpfr_init2(v, prec);
if (n > 1) {
mpfr_set_d(v, 2.0, rnd);
ft_mpfr_set_triangular_banded_index(A, v, 1, 1, rnd);
}
for (int i = 2; i < n; i++) {
mpfr_set_d(v, -i*(i-1.0), rnd);
ft_mpfr_set_triangular_banded_index(A, v, i-2, i, rnd);
mpfr_set_d(v, i*(i+1.0), rnd);
ft_mpfr_set_triangular_banded_index(A, v, i, i, rnd);
}
mpfr_clear(v);
return A;
}
static inline ft_mpfr_triangular_banded * ft_mpfr_create_B_legendre_to_chebyshev(const int n, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * B = ft_mpfr_calloc_triangular_banded(n, 2, prec);
mpfr_t v;
mpfr_init2(v, prec);
if (n > 0) {
mpfr_set_d(v, 2.0, rnd);
ft_mpfr_set_triangular_banded_index(B, v, 0, 0, rnd);
}
if (n > 1) {
mpfr_set_d(v, 1.0, rnd);
ft_mpfr_set_triangular_banded_index(B, v, 1, 1, rnd);
}
for (int i = 2; i < n; i++) {
mpfr_set_d(v, -1.0, rnd);
ft_mpfr_set_triangular_banded_index(B, v, i-2, i, rnd);
mpfr_set_d(v, 1.0, rnd);
ft_mpfr_set_triangular_banded_index(B, v, i, i, rnd);
}
mpfr_clear(v);
return B;
}
mpfr_t * ft_mpfr_plan_legendre_to_chebyshev(const int normleg, const int normcheb, const int n, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * A = ft_mpfr_create_A_legendre_to_chebyshev(n, prec, rnd);
ft_mpfr_triangular_banded * B = ft_mpfr_create_B_legendre_to_chebyshev(n, prec, rnd);
mpfr_t * V = malloc(n*n*sizeof(mpfr_t));
for (int j = 0; j < n; j++) {
for (int i = 0; i < n; i++) {
mpfr_init2(V[i+j*n], prec);
mpfr_set_zero(V[i+j*n], 1);
}
mpfr_set_d(V[j+j*n], 1.0, rnd);
}
ft_mpfr_triangular_banded_eigenvectors(A, B, V, prec, rnd);
mpfr_t * sclrow = malloc(n*sizeof(mpfr_t));
mpfr_t * sclcol = malloc(n*sizeof(mpfr_t));
mpfr_t t, t1, sqrtpi, sqrtpi2;
mpfr_init2(t, prec);
mpfr_init2(t1, prec);
mpfr_set_d(t, 1.0, rnd);
mpfr_t half;
mpfr_init2(half, prec);
mpfr_set_d(half, 0.5, rnd);
mpfr_init2(sqrtpi, prec);
mpfr_gamma(sqrtpi, half, rnd);
mpfr_t sqrthalf;
mpfr_init2(sqrthalf, prec);
mpfr_sqrt(sqrthalf, half, rnd);
mpfr_init2(sqrtpi2, prec);
mpfr_mul(sqrtpi2, sqrtpi, sqrthalf, rnd);
if (n > 0) {
//sclrow[0] = normcheb ? sqrtpi : 1;
mpfr_init2(sclrow[0], prec);
normcheb ? mpfr_set(sclrow[0], sqrtpi, rnd) : mpfr_set_d(sclrow[0], 1.0, rnd);
//sclcol[0] = normleg ? Y2(sqrt)(0.5) : 1;
mpfr_init2(sclcol[0], prec);
normleg ? mpfr_set(sclcol[0], sqrthalf, rnd) : mpfr_set_d(sclcol[0], 1.0, rnd);
}
if (n > 1) {
//sclrow[1] = normcheb ? sqrtpi2 : 1;
mpfr_init2(sclrow[1], prec);
normcheb ? mpfr_set(sclrow[1], sqrtpi2, rnd) : mpfr_set_d(sclrow[1], 1.0, rnd);
//sclcol[1] = normleg ? Y2(sqrt)(1.5) : 1;
mpfr_init2(sclcol[1], prec);
mpfr_set_d(t1, 1.5, rnd);
normleg ? mpfr_sqrt(sclcol[1], t1, rnd) : mpfr_set_d(sclcol[1], 1.0, rnd);
}
mpfr_t num, den, rat;
mpfr_init2(num, prec);
mpfr_init2(den, prec);
mpfr_init2(rat, prec);
for (int i = 2; i < n; i++) {
//t *= (2*i-ONE(FLT2))/(2*i);
mpfr_set_d(num, 2*i-1, rnd);
mpfr_set_d(den, 2*i, rnd);
mpfr_div(rat, num, den, rnd);
mpfr_mul(t, rat, t, rnd);
//sclrow[i] = normcheb ? sqrtpi2 : 1;
mpfr_init2(sclrow[i], prec);
normcheb ? mpfr_set(sclrow[i], sqrtpi2, rnd) : mpfr_set_d(sclrow[i], 1.0, rnd);
//sclcol[i] = (normleg ? Y2(sqrt)(i+0.5) : 1)*t;
mpfr_init2(sclcol[i], prec);
mpfr_set_d(t1, i+0.5, rnd);
normleg ? mpfr_sqrt(sclcol[i], t1, rnd) : mpfr_set_d(sclcol[i], 1.0, rnd);
mpfr_mul(sclcol[i], t, sclcol[i], rnd);
}
for (int j = 0; j < n; j++)
for (int i = j; i >= 0; i -= 2) {
//V[i+j*n] = sclrow[i]*Vl[i+j*n]*sclcol[j];
mpfr_mul(V[i+j*n], sclrow[i], V[i+j*n], rnd);
mpfr_mul(V[i+j*n], V[i+j*n], sclcol[j], rnd);
}
ft_mpfr_destroy_triangular_banded(A);
ft_mpfr_destroy_triangular_banded(B);
for (int i = 0; i < n; i++) {
mpfr_clear(sclrow[i]);
mpfr_clear(sclcol[i]);
}
free(sclrow);
free(sclcol);
mpfr_clear(t);
mpfr_clear(t1);
mpfr_clear(sqrtpi);
mpfr_clear(sqrtpi2);
mpfr_clear(half);
mpfr_clear(sqrthalf);
mpfr_clear(num);
mpfr_clear(den);
mpfr_clear(rat);
return V;
}
static inline ft_mpfr_triangular_banded * ft_mpfr_create_A_chebyshev_to_legendre(const int n, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * A = ft_mpfr_calloc_triangular_banded(n, 2, prec);
mpfr_t v, w, x;
mpfr_init2(v, prec);
mpfr_init2(w, prec);
mpfr_init2(x, prec);
if (n > 1) {
mpfr_set_d(w, 1, rnd);
mpfr_set_d(x, 3, rnd);
mpfr_div(v, w, x, rnd);
ft_mpfr_set_triangular_banded_index(A, v, 1, 1, rnd);
}
for (int i = 2; i < n; i++) {
mpfr_set_d(w, -(i+1.0)*(i+1.0), rnd);
mpfr_set_d(x, 2*i+1, rnd);
mpfr_div(v, w, x, rnd);
ft_mpfr_set_triangular_banded_index(A, v, i-2, i, rnd);
mpfr_set_d(w, 1.0*i*i, rnd);
mpfr_div(v, w, x, rnd);
ft_mpfr_set_triangular_banded_index(A, v, i, i, rnd);
}
mpfr_clear(v);
mpfr_clear(w);
mpfr_clear(x);
return A;
}
static inline ft_mpfr_triangular_banded * ft_mpfr_create_B_chebyshev_to_legendre(const int n, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * B = ft_mpfr_calloc_triangular_banded(n, 2, prec);
mpfr_t v, w, x;
mpfr_init2(v, prec);
mpfr_init2(w, prec);
mpfr_init2(x, prec);
if (n > 0) {
mpfr_set_d(v, 1, rnd);
ft_mpfr_set_triangular_banded_index(B, v, 0, 0, rnd);
}
if (n > 1) {
mpfr_set_d(w, 1, rnd);
mpfr_set_d(x, 3, rnd);
mpfr_div(v, w, x, rnd);
ft_mpfr_set_triangular_banded_index(B, v, 1, 1, rnd);
}
for (int i = 2; i < n; i++) {
mpfr_set_d(w, -1, rnd);
mpfr_set_d(x, 2*i+1, rnd);
mpfr_div(v, w, x, rnd);
ft_mpfr_set_triangular_banded_index(B, v, i-2, i, rnd);
mpfr_set_d(w, 1, rnd);
mpfr_div(v, w, x, rnd);
ft_mpfr_set_triangular_banded_index(B, v, i, i, rnd);
}
mpfr_clear(v);
mpfr_clear(w);
mpfr_clear(x);
return B;
}
mpfr_t * ft_mpfr_plan_chebyshev_to_legendre(const int normcheb, const int normleg, const int n, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * A = ft_mpfr_create_A_chebyshev_to_legendre(n, prec, rnd);
ft_mpfr_triangular_banded * B = ft_mpfr_create_B_chebyshev_to_legendre(n, prec, rnd);
mpfr_t * V = malloc(n*n*sizeof(mpfr_t));
for (int j = 0; j < n; j++) {
for (int i = 0; i < n; i++) {
mpfr_init2(V[i+j*n], prec);
mpfr_set_zero(V[i+j*n], 1);
}
mpfr_set_d(V[j+j*n], 1.0, rnd);
}
ft_mpfr_triangular_banded_eigenvectors(A, B, V, prec, rnd);
mpfr_t * sclrow = malloc(n*sizeof(mpfr_t));
mpfr_t * sclcol = malloc(n*sizeof(mpfr_t));
mpfr_t t, t1, sqrtpi, sqrt_1_pi, sqrt_2_pi;
mpfr_init2(t, prec);
mpfr_init2(t1, prec);
mpfr_set_d(t, 1.0, rnd);
mpfr_t half;
mpfr_init2(half, prec);
mpfr_set_d(half, 0.5, rnd);
mpfr_init2(sqrtpi, prec);
mpfr_gamma(sqrtpi, half, rnd);
mpfr_init2(sqrt_1_pi, prec);
mpfr_div(sqrt_1_pi, t, sqrtpi, rnd);
mpfr_t sqrt2;
mpfr_init2(sqrt2, prec);
mpfr_sqrt_ui(sqrt2, 2, rnd);
mpfr_init2(sqrt_2_pi, prec);
mpfr_mul(sqrt_2_pi, sqrt_1_pi, sqrt2, rnd);
if (n > 0) {
//sclrow[0] = normleg ? 1/Y2(sqrt)(0.5) : 1;
mpfr_init2(sclrow[0], prec);
normleg ? mpfr_set(sclrow[0], sqrt2, rnd) : mpfr_set_d(sclrow[0], 1.0, rnd);
//sclcol[0] = normcheb ? sqrt_1_pi : 1;
mpfr_init2(sclcol[0], prec);
normcheb ? mpfr_set(sclcol[0], sqrt_1_pi, rnd) : mpfr_set_d(sclcol[0], 1.0, rnd);
}
if (n > 1) {
//sclrow[1] = normleg ? 1/Y2(sqrt)(1.5) : 1;
mpfr_init2(sclrow[1], prec);
mpfr_set_d(t1, 1.5, rnd);
normleg ? mpfr_rec_sqrt(sclrow[1], t1, rnd) : mpfr_set_d(sclrow[1], 1.0, rnd);
//sclcol[1] = normcheb ? sqrt_2_pi : 1;
mpfr_init2(sclcol[1], prec);
normcheb ? mpfr_set(sclcol[1], sqrt_2_pi, rnd) : mpfr_set_d(sclcol[1], 1.0, rnd);
}
mpfr_t num, den, rat;
mpfr_init2(num, prec);
mpfr_init2(den, prec);
mpfr_init2(rat, prec);
for (int i = 2; i < n; i++) {
//t *= (2*i)/(2*i-ONE(FLT2));
mpfr_set_d(num, 2*i, rnd);
mpfr_set_d(den, 2*i-1, rnd);
mpfr_div(rat, num, den, rnd);
mpfr_mul(t, rat, t, rnd);
//sclrow[i] = normleg ? 1/Y2(sqrt)(i+0.5) : 1;
mpfr_init2(sclrow[i], prec);
mpfr_set_d(t1, i+0.5, rnd);
normleg ? mpfr_rec_sqrt(sclrow[i], t1, rnd) : mpfr_set_d(sclrow[i], 1.0, rnd);
//sclcol[i] = (normcheb ? sqrt_2_pi : 1)*t;
mpfr_init2(sclcol[i], prec);
normcheb ? mpfr_set(sclcol[i], sqrt_2_pi, rnd) : mpfr_set_d(sclcol[i], 1.0, rnd);
mpfr_mul(sclcol[i], t, sclcol[i], rnd);
}
for (int j = 0; j < n; j++)
for (int i = j; i >= 0; i -= 2) {
//V[i+j*n] = sclrow[i]*Vl[i+j*n]*sclcol[j];
mpfr_mul(V[i+j*n], sclrow[i], V[i+j*n], rnd);
mpfr_mul(V[i+j*n], V[i+j*n], sclcol[j], rnd);
}
ft_mpfr_destroy_triangular_banded(A);
ft_mpfr_destroy_triangular_banded(B);
for (int i = 0; i < n; i++) {
mpfr_clear(sclrow[i]);
mpfr_clear(sclcol[i]);
}
free(sclrow);
free(sclcol);
mpfr_clear(t);
mpfr_clear(t1);
mpfr_clear(sqrtpi);
mpfr_clear(sqrt_1_pi);
mpfr_clear(sqrt_2_pi);
mpfr_clear(half);
mpfr_clear(sqrt2);
mpfr_clear(num);
mpfr_clear(den);
mpfr_clear(rat);
return V;
}
static inline ft_mpfr_triangular_banded * ft_mpfr_create_A_ultraspherical_to_ultraspherical(const int n, mpfr_srcptr lambda, mpfr_srcptr mu, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * A = ft_mpfr_calloc_triangular_banded(n, 2, prec);
mpfr_t v, t1, t2, t3, t4, t5, t6, t7, t8;
mpfr_init2(v, prec);
mpfr_init2(t1, prec);
mpfr_init2(t2, prec);
mpfr_init2(t3, prec);
mpfr_init2(t4, prec);
mpfr_init2(t5, prec);
mpfr_init2(t6, prec);
mpfr_init2(t7, prec);
mpfr_init2(t8, prec);
if (n > 1) {
// v = (1+2*lambda)*mu/(1+mu);
mpfr_mul_d(t1, lambda, 2, rnd);
mpfr_add_d(t2, t1, 1, rnd);
mpfr_mul(t3, t2, mu, rnd);
mpfr_add_d(t4, mu, 1, rnd);
mpfr_div(v, t3, t4, rnd);
ft_mpfr_set_triangular_banded_index(A, v, 1, 1, rnd);
}
for (int i = 2; i < n; i++) {
// v = -(i+2*mu)*(i+2*(mu-lambda))*mu/(i+mu);
mpfr_mul_d(t1, mu, 2, rnd);
mpfr_add_d(t2, t1, i, rnd);
mpfr_sub(t3, mu, lambda, rnd);
mpfr_mul_d(t4, t3, 2, rnd);
mpfr_add_d(t5, t4, i, rnd);
mpfr_mul(t6, t2, t5, rnd);
// t7 = mu/(i+mu);
mpfr_add_d(t7, mu, i, rnd);
mpfr_div(t7, mu, t7, rnd);
mpfr_mul(t8, t6, t7, rnd);
mpfr_neg(v, t8, rnd);
ft_mpfr_set_triangular_banded_index(A, v, i-2, i, rnd);
// v = i*(i+2*lambda)*mu/(i+mu);
mpfr_mul_d(t1, lambda, 2, rnd);
mpfr_add_d(t2, t1, i, rnd);
mpfr_mul_d(t3, t2, i, rnd);
mpfr_mul(v, t3, t7, rnd);
ft_mpfr_set_triangular_banded_index(A, v, i, i, rnd);
}
mpfr_clear(v);
mpfr_clear(t1);
mpfr_clear(t2);
mpfr_clear(t3);
mpfr_clear(t4);
mpfr_clear(t5);
mpfr_clear(t6);
mpfr_clear(t7);
mpfr_clear(t8);
return A;
}
static inline ft_mpfr_triangular_banded * ft_mpfr_create_B_ultraspherical_to_ultraspherical(const int n, mpfr_srcptr mu, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * B = ft_mpfr_calloc_triangular_banded(n, 2, prec);
mpfr_t v, t1, t2;
mpfr_init2(v, prec);
mpfr_init2(t1, prec);
mpfr_init2(t2, prec);
if (n > 0) {
mpfr_set_d(v, 1, rnd);
ft_mpfr_set_triangular_banded_index(B, v, 0, 0, rnd);
}
if (n > 1) {
mpfr_add_d(t1, mu, 1, rnd);
mpfr_div(v, mu, t1, rnd);
ft_mpfr_set_triangular_banded_index(B, v, 1, 1, rnd);
}
for (int i = 2; i < n; i++) {
mpfr_add_d(t1, mu, i, rnd);
mpfr_div(v, mu, t1, rnd);
mpfr_neg(t2, v, rnd);
ft_mpfr_set_triangular_banded_index(B, t2, i-2, i, rnd);
ft_mpfr_set_triangular_banded_index(B, v, i, i, rnd);
}
mpfr_clear(v);
mpfr_clear(t1);
mpfr_clear(t2);
return B;
}
mpfr_t * ft_mpfr_plan_ultraspherical_to_ultraspherical(const int norm1, const int norm2, const int n, mpfr_srcptr lambda, mpfr_srcptr mu, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * A = ft_mpfr_create_A_ultraspherical_to_ultraspherical(n, lambda, mu, prec, rnd);
ft_mpfr_triangular_banded * B = ft_mpfr_create_B_ultraspherical_to_ultraspherical(n, mu, prec, rnd);
mpfr_t * V = malloc(n*n*sizeof(mpfr_t));
for (int j = 0; j < n; j++) {
for (int i = 0; i < n; i++) {
mpfr_init2(V[i+j*n], prec);
mpfr_set_zero(V[i+j*n], 1);
}
mpfr_set_d(V[j+j*n], 1.0, rnd);
}
ft_mpfr_triangular_banded_eigenvectors(A, B, V, prec, rnd);
mpfr_t * sclrow = malloc(n*sizeof(mpfr_t));
mpfr_t * sclcol = malloc(n*sizeof(mpfr_t));
mpfr_t t1, t2, t3, t4, t5, t6, t7;
mpfr_init2(t1, prec);
mpfr_init2(t2, prec);
mpfr_init2(t3, prec);
mpfr_init2(t4, prec);
mpfr_init2(t5, prec);
mpfr_init2(t6, prec);
mpfr_init2(t7, prec);
if (n > 0) {
//sclrow[0] = norm2 ? Y2(sqrt)(Y2(tgamma)(0.5)*Y2(tgamma)(mu2+0.5)/Y2(tgamma)(mu2+1)) : 1;
mpfr_set_d(t1, 0.5, rnd);
mpfr_gamma(t2, t1, rnd);
mpfr_add_d(t3, mu, 0.5, rnd);
mpfr_gamma(t4, t3, rnd);
mpfr_add_d(t5, mu, 1.0, rnd);
mpfr_gamma(t6, t5, rnd);
mpfr_mul(t7, t2, t4, rnd);
mpfr_div(t7, t7, t6, rnd);
mpfr_sqrt(t7, t7, rnd);
mpfr_init2(sclrow[0], prec);
norm2 ? mpfr_set(sclrow[0], t7, rnd) : mpfr_set_d(sclrow[0], 1.0, rnd);
//sclcol[0] = norm1 ? Y2(sqrt)(Y2(tgamma)(lambda2+1)/(Y2(tgamma)(0.5)*Y2(tgamma)(lambda2+0.5))) : 1;
mpfr_add_d(t3, lambda, 1.0, rnd);
mpfr_gamma(t4, t3, rnd);
mpfr_add_d(t5, lambda, 0.5, rnd);
mpfr_gamma(t6, t5, rnd);
mpfr_mul(t7, t2, t6, rnd);
mpfr_div(t7, t4, t7, rnd);
mpfr_sqrt(t7, t7, rnd);
mpfr_init2(sclcol[0], prec);
norm1 ? mpfr_set(sclcol[0], t7, rnd) : mpfr_set_d(sclcol[0], 1.0, rnd);
}
for (int i = 1; i < n; i++) {
//sclrow[i] = norm2 ? Y2(sqrt)((i-1+mu2)/i*(i-1+2*mu2)/(i+mu2))*sclrow[i-1] : 1;
mpfr_add_d(t1, mu, i-1, rnd);
mpfr_div_d(t2, t1, i, rnd);
mpfr_add(t3, t1, mu, rnd);
mpfr_add_d(t4, t1, 1, rnd);
mpfr_div(t5, t3, t4, rnd);
mpfr_mul(t6, t2, t5, rnd);
mpfr_sqrt(t6, t6, rnd);
mpfr_mul(t7, t6, sclrow[i-1], rnd);
mpfr_init2(sclrow[i], prec);
norm2 ? mpfr_set(sclrow[i], t7, rnd) : mpfr_set_d(sclrow[i], 1.0, rnd);
//sclcol[i] = norm1 ? Y2(sqrt)(i/(i-1+lambda2)*(i+lambda2)/(i-1+2*lambda2))*(i-1+lambda2)/(i-1+mu2)*sclcol[i-1] : (i-1+lambda2)/(i-1+mu2)*sclcol[i-1];
mpfr_add_d(t1, lambda, i-1, rnd);
mpfr_d_div(t2, i, t1, rnd);
mpfr_add_d(t3, t1, 1, rnd);
mpfr_add(t4, t1, lambda, rnd);
mpfr_div(t5, t3, t4, rnd);
mpfr_mul(t6, t2, t5, rnd);
mpfr_sqrt(t7, t6, rnd);
// t1 = (i-1+lambda)
mpfr_add_d(t2, mu, i-1, rnd);
mpfr_div(t3, t1, t2, rnd);
mpfr_mul(t4, t3, sclcol[i-1], rnd);
mpfr_init2(sclcol[i], prec);
norm1 ? mpfr_mul(sclcol[i], t7, t4, rnd) : mpfr_set(sclcol[i], t4, rnd);
}
for (int j = 0; j < n; j++)
for (int i = j; i >= 0; i -= 2) {
//V[i+j*n] = sclrow[i]*Vl[i+j*n]*sclcol[j];
mpfr_mul(V[i+j*n], sclrow[i], V[i+j*n], rnd);
mpfr_mul(V[i+j*n], V[i+j*n], sclcol[j], rnd);
}
ft_mpfr_destroy_triangular_banded(A);
ft_mpfr_destroy_triangular_banded(B);
for (int i = 0; i < n; i++) {
mpfr_clear(sclrow[i]);
mpfr_clear(sclcol[i]);
}
free(sclrow);
free(sclcol);
mpfr_clear(t1);
mpfr_clear(t2);
mpfr_clear(t3);
mpfr_clear(t4);
mpfr_clear(t5);
mpfr_clear(t6);
mpfr_clear(t7);
return V;
}
static inline ft_mpfr_triangular_banded * ft_mpfr_create_A_jacobi_to_jacobi(const int n, mpfr_srcptr alpha, mpfr_srcptr beta, mpfr_srcptr gamma, mpfr_srcptr delta, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * A = ft_mpfr_calloc_triangular_banded(n, 2, prec);
mpfr_t v, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15;
mpfr_init2(v, prec);
mpfr_init2(t1, prec);
mpfr_init2(t2, prec);
mpfr_init2(t3, prec);
mpfr_init2(t4, prec);
mpfr_init2(t5, prec);
mpfr_init2(t6, prec);
mpfr_init2(t7, prec);
mpfr_init2(t8, prec);
mpfr_init2(t9, prec);
mpfr_init2(t10, prec);
mpfr_init2(t11, prec);
mpfr_init2(t12, prec);
mpfr_init2(t13, prec);
mpfr_init2(t14, prec);
mpfr_init2(t15, prec);
if (n > 1) {
mpfr_add(t1, alpha, beta, rnd);
mpfr_sub(t2, alpha, beta, rnd);
mpfr_add(t3, gamma, delta, rnd);
mpfr_sub(t4, gamma, delta, rnd);
mpfr_sub(t1, t3, t1, rnd);
mpfr_div_d(t1, t1, 2, rnd);
mpfr_sub(t2, t4, t2, rnd);
mpfr_div_d(t2, t2, 2, rnd);
mpfr_add_d(t3, t3, 2, rnd);
mpfr_add_d(t5, t3, 2, rnd);
// v = (gamma-delta)*(gamma+delta+2)/(gamma+delta+4)*(1+(gamma-alpha+delta-beta)/2) - (gamma+delta+2)*(gamma-alpha+beta-delta)/2;
mpfr_add_d(v, t1, 1, rnd);
mpfr_mul(v, v, t3, rnd);
mpfr_div(v, v, t5, rnd);
mpfr_mul(v, v, t4, rnd);
mpfr_mul(t2, t2, t3, rnd);
mpfr_sub(v, v, t2, rnd);
ft_mpfr_set_triangular_banded_index(A, v, 0, 1, rnd);
// v = (alpha+beta+2)*(gamma+delta+2)/(gamma+delta+4);
mpfr_add(v, alpha, beta, rnd);
mpfr_add_d(v, v, 2, rnd);
mpfr_mul(v, v, t3, rnd);
mpfr_div(v, v, t5, rnd);
ft_mpfr_set_triangular_banded_index(A, v, 1, 1, rnd);
}
for (int i = 2; i < n; i++) {
mpfr_add_d(t1, gamma, i, rnd);
mpfr_add_d(t2, delta, i, rnd);
mpfr_add(t3, t1, t2, rnd);
mpfr_add_d(t4, t3, 1, rnd);
mpfr_add_d(t5, t4, 1, rnd);
mpfr_add(t6, alpha, beta, rnd);
mpfr_sub(t7, alpha, beta, rnd);
mpfr_add(t8, gamma, delta, rnd);
mpfr_sub(t9, gamma, delta, rnd);
mpfr_add_d(t10, t8, i+1, rnd);
mpfr_add_d(t11, t10, 1, rnd);
mpfr_add_d(t12, t6, i+1, rnd);
mpfr_sub(t13, t8, t6, rnd);
mpfr_add_d(t14, t13, i, rnd);
mpfr_div_d(t13, t13, 2, rnd);
mpfr_add_d(t8, t8, 2, rnd);
mpfr_mul(t8, t8, t13, rnd);
mpfr_sub(t15, t9, t7, rnd);
mpfr_div_d(t15, t15, 2, rnd);
mpfr_mul(t15, t10, t15, rnd);
// v = -(i+gamma+delta+1)*(i+gamma)/(2*i+gamma+delta)*(i+delta)/(2*i+gamma+delta+1)*(i+gamma-alpha+delta-beta)
mpfr_mul(v, t10, t1, rnd);
mpfr_div(v, v, t3, rnd);
mpfr_mul(v, v, t2, rnd);
mpfr_div(v, v, t4, rnd);
mpfr_mul(v, v, t14, rnd);
mpfr_neg(v, v, rnd);
ft_mpfr_set_triangular_banded_index(A, v, i-2, i, rnd);
// v = (gamma-delta)*(i+gamma+delta+1)/(2*i+gamma+delta)/(2*i+gamma+delta+2)*(i*(i+gamma+delta+1)+(gamma+delta+2)*(gamma-alpha+delta-beta)/2) - (i+gamma+delta+1)*(gamma-alpha+beta-delta)/2;
mpfr_mul_d(v, t10, i, rnd);
mpfr_add(v, v, t8, rnd);
mpfr_mul(v, v, t9, rnd);
mpfr_div(v, v, t3, rnd);
mpfr_mul(v, v, t10, rnd);
mpfr_div(v, v, t5, rnd);
mpfr_sub(v, v, t15, rnd);
ft_mpfr_set_triangular_banded_index(A, v, i-1, i, rnd);
// v = i*(i+alpha+beta+1)*(i+gamma+delta+1)/(2*i+gamma+delta+1)*(i+gamma+delta+2)/(2*i+gamma+delta+2);
mpfr_mul_d(v, t12, i, rnd);
mpfr_mul(v, v, t10, rnd);
mpfr_div(v, v, t4, rnd);
mpfr_mul(v, v, t11, rnd);
mpfr_div(v, v, t5, rnd);
ft_mpfr_set_triangular_banded_index(A, v, i, i, rnd);
}
mpfr_clear(v);
mpfr_clear(t1);
mpfr_clear(t2);
mpfr_clear(t3);
mpfr_clear(t4);
mpfr_clear(t5);
mpfr_clear(t6);
mpfr_clear(t7);
mpfr_clear(t8);
mpfr_clear(t9);
mpfr_clear(t10);
mpfr_clear(t11);
mpfr_clear(t12);
mpfr_clear(t13);
mpfr_clear(t14);
mpfr_clear(t15);
return A;
}
static inline ft_mpfr_triangular_banded * ft_mpfr_create_B_jacobi_to_jacobi(const int n, mpfr_srcptr gamma, mpfr_srcptr delta, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * B = ft_mpfr_calloc_triangular_banded(n, 2, prec);
mpfr_t v, t1, t2, t3, t4, t5, t6;
mpfr_init2(v, prec);
mpfr_init2(t1, prec);
mpfr_init2(t2, prec);
mpfr_init2(t3, prec);
mpfr_init2(t4, prec);
mpfr_init2(t5, prec);
mpfr_init2(t6, prec);
if (n > 0) {
// v = 1;
mpfr_set_d(v, 1.0, rnd);
ft_mpfr_set_triangular_banded_index(B, v, 0, 0, rnd);
}
if (n > 1) {
// v = (gamma-delta)/(gamma+delta+4);
mpfr_sub(t1, gamma, delta, rnd);
mpfr_add(t2, gamma, delta, rnd);
mpfr_add_d(t2, t2, 4, rnd);
mpfr_div(v, t1, t2, rnd);
ft_mpfr_set_triangular_banded_index(B, v, 0, 1, rnd);
// v = (gamma+delta+2)/(gamma+delta+4);
mpfr_add(t1, gamma, delta, rnd);
mpfr_add_d(t1, t1, 2, rnd);
mpfr_div(v, t1, t2, rnd);
ft_mpfr_set_triangular_banded_index(B, v, 1, 1, rnd);
}
for (int i = 2; i < n; i++) {
// v = -(i+gamma)/(2*i+gamma+delta)*(i+delta)/(2*i+gamma+delta+1);
mpfr_add_d(t1, gamma, i, rnd);
mpfr_add_d(t2, delta, i, rnd);
mpfr_add(t3, t1, t2, rnd); // Preserve t3.
mpfr_add_d(t4, t3, 1, rnd);
mpfr_div(t5, t1, t3, rnd);
mpfr_div(t6, t2, t4, rnd);
mpfr_mul(v, t5, t6, rnd);
mpfr_neg(v, v, rnd);
ft_mpfr_set_triangular_banded_index(B, v, i-2, i, rnd);
// v = (gamma-delta)*(i+gamma+delta+1)/(2*i+gamma+delta)/(2*i+gamma+delta+2);
mpfr_sub(t1, gamma, delta, rnd);
mpfr_sub_d(t2, t3, i-1, rnd); // Preserve t2.
mpfr_add_d(t4, t3, 2, rnd);
mpfr_div(t5, t1, t3, rnd);
mpfr_div(t6, t2, t4, rnd);
mpfr_mul(v, t5, t6, rnd);
ft_mpfr_set_triangular_banded_index(B, v, i-1, i, rnd);
// v = (i+gamma+delta+1)/(2*i+gamma+delta+1)*(i+gamma+delta+2)/(2*i+gamma+delta+2);
mpfr_add_d(t1, t3, 1, rnd);
mpfr_add_d(t4, t2, 1, rnd);
mpfr_add_d(t3, t3, 2, rnd);
mpfr_div(t5, t2, t1, rnd);
mpfr_div(t6, t4, t3, rnd);
mpfr_mul(v, t5, t6, rnd);
ft_mpfr_set_triangular_banded_index(B, v, i, i, rnd);
}
mpfr_clear(v);
mpfr_clear(t1);
mpfr_clear(t2);
mpfr_clear(t3);
mpfr_clear(t4);
mpfr_clear(t5);
mpfr_clear(t6);
return B;
}
mpfr_t * ft_mpfr_plan_jacobi_to_jacobi(const int norm1, const int norm2, const int n, mpfr_srcptr alpha, mpfr_srcptr beta, mpfr_srcptr gamma, mpfr_srcptr delta, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * A = ft_mpfr_create_A_jacobi_to_jacobi(n, alpha, beta, gamma, delta, prec, rnd);
ft_mpfr_triangular_banded * B = ft_mpfr_create_B_jacobi_to_jacobi(n, gamma, delta, prec, rnd);
mpfr_t * V = malloc(n*n*sizeof(mpfr_t));
for (int j = 0; j < n; j++) {
for (int i = 0; i < n; i++) {
mpfr_init2(V[i+j*n], prec);
mpfr_set_zero(V[i+j*n], 1);
}
mpfr_set_d(V[j+j*n], 1.0, rnd);
}
ft_mpfr_triangular_banded_eigenvectors(A, B, V, prec, rnd);
mpfr_t * sclrow = malloc(n*sizeof(mpfr_t));
mpfr_t * sclcol = malloc(n*sizeof(mpfr_t));
mpfr_t t1, t2, t3, t4;
mpfr_init2(t1, prec);
mpfr_init2(t2, prec);
mpfr_init2(t3, prec);
mpfr_init2(t4, prec);
if (n > 0) {
//sclrow[0] = norm2 ? Y2(sqrt)(Y2(pow)(2, gamma2+delta2+1)*Y2(tgamma)(gamma2+1)*Y2(tgamma)(delta2+1)/Y2(tgamma)(gamma2+delta2+2)) : 1;
mpfr_add_d(t1, gamma, 1, rnd);
mpfr_add_d(t2, delta, 1, rnd);
mpfr_add(t3, t1, t2, rnd);
mpfr_sub_d(t4, t3, 1, rnd);
mpfr_gamma(t1, t1, rnd);
mpfr_gamma(t2, t2, rnd);
mpfr_gamma(t3, t3, rnd);
mpfr_ui_pow(t4, 2, t4, rnd);
mpfr_div(t3, t3, t1, rnd);
mpfr_div(t3, t3, t2, rnd);
mpfr_div(t3, t3, t4, rnd);
mpfr_rec_sqrt(t3, t3, rnd);
mpfr_init2(sclrow[0], prec);
norm2 ? mpfr_set(sclrow[0], t3, rnd) : mpfr_set_d(sclrow[0], 1.0, rnd);
//sclcol[0] = norm1 ? Y2(sqrt)(Y2(tgamma)(alpha2+beta2+2)/(Y2(pow)(2, alpha2+beta2+1)*Y2(tgamma)(alpha2+1)*Y2(tgamma)(beta2+1))) : 1;
mpfr_add_d(t1, alpha, 1, rnd);
mpfr_add_d(t2, beta, 1, rnd);
mpfr_add(t3, t1, t2, rnd);
mpfr_sub_d(t4, t3, 1, rnd);
mpfr_gamma(t1, t1, rnd);
mpfr_gamma(t2, t2, rnd);
mpfr_gamma(t3, t3, rnd);
mpfr_ui_pow(t4, 2, t4, rnd);
mpfr_div(t3, t3, t1, rnd);
mpfr_div(t3, t3, t2, rnd);
mpfr_div(t3, t3, t4, rnd);
mpfr_sqrt(t3, t3, rnd);
mpfr_init2(sclcol[0], prec);
norm1 ? mpfr_set(sclcol[0], t3, rnd) : mpfr_set_d(sclcol[0], 1.0, rnd);
}
if (n > 1) {
//sclrow[1] = norm2 ? Y2(sqrt)((gamma2+1)*(delta2+1)/(gamma2+delta2+3))*sclrow[0] : 1;
mpfr_add_d(t1, gamma, 1, rnd);
mpfr_add_d(t2, delta, 1, rnd);
mpfr_add(t3, t1, t2, rnd);
mpfr_add_d(t3, t3, 1, rnd);
mpfr_mul(t1, t1, t2, rnd);
mpfr_div(t3, t1, t3, rnd);
mpfr_sqrt(t3, t3, rnd);
mpfr_init2(sclrow[1], prec);
norm2 ? mpfr_mul(sclrow[1], t3, sclrow[0], rnd) : mpfr_set_d(sclrow[1], 1.0, rnd);
//sclcol[1] = norm1 ? Y2(sqrt)((alpha2+beta2+3)/(alpha2+1)/(beta2+1))*(alpha2+beta2+2)/(gamma2+delta2+2)*sclcol[0] : (alpha2+beta2+2)/(gamma2+delta2+2);
mpfr_add_d(t1, alpha, 1, rnd);
mpfr_add_d(t2, beta, 1, rnd);
mpfr_add(t3, t1, t2, rnd);
mpfr_add_d(t3, t3, 1, rnd);
mpfr_mul(t1, t1, t2, rnd);
mpfr_div(t3, t3, t1, rnd);
mpfr_sqrt(t3, t3, rnd);
mpfr_add(t1, alpha, beta, rnd);
mpfr_add_d(t1, t1, 2, rnd);
mpfr_add(t2, gamma, delta, rnd);
mpfr_add_d(t2, t2, 2, rnd);
mpfr_div(t4, t1, t2, rnd);
mpfr_mul(t4, t4, sclcol[0], rnd);
mpfr_init2(sclcol[1], prec);
norm1 ? mpfr_mul(sclcol[1], t3, t4, rnd) : mpfr_set(sclcol[1], t4, rnd);
}
for (int i = 2; i < n; i++) {
//sclrow[i] = norm2 ? Y2(sqrt)((i+gamma2)/i*(i+delta2)/(i+gamma2+delta2)*(2*i+gamma2+delta2-1)/(2*i+gamma2+delta2+1))*sclrow[i-1] : 1;
mpfr_add_d(t1, gamma, i, rnd);
mpfr_add_d(t2, delta, i, rnd);
mpfr_add(t3, t1, t2, rnd);
mpfr_sub_d(t3, t3, 1, rnd);
mpfr_add_d(t4, t3, 2, rnd);
mpfr_div_d(t1, t1, i, rnd);
mpfr_mul(t1, t1, t2, rnd);
mpfr_add(t2, t2, gamma, rnd);
mpfr_div(t1, t1, t2, rnd);
mpfr_div(t3, t3, t4, rnd);
mpfr_mul(t3, t1, t3, rnd);
mpfr_sqrt(t3, t3, rnd);
mpfr_init2(sclrow[i], prec);
norm2 ? mpfr_mul(sclrow[i], t3, sclrow[i-1], rnd) : mpfr_set_d(sclrow[i], 1.0, rnd);
//sclcol[i] = norm1 ? Y2(sqrt)(i/(i+alpha2)*(i+alpha2+beta2)/(i+beta2)*(2*i+alpha2+beta2+1)/(2*i+alpha2+beta2-1))*(2*i+alpha2+beta2-1)/(i+alpha2+beta2)*(2*i+alpha2+beta2)/(2*i+gamma2+delta2-1)*(i+gamma2+delta2)/(2*i+gamma2+delta2)*sclcol[i-1] : (2*i+alpha2+beta2-1)/(i+alpha2+beta2)*(2*i+alpha2+beta2)/(2*i+gamma2+delta2-1)*(i+gamma2+delta2)/(2*i+gamma2+delta2)*sclcol[i-1];
mpfr_add_d(t1, alpha, i, rnd);
mpfr_add_d(t2, beta, i, rnd);
mpfr_add(t3, t1, t2, rnd);
mpfr_sub_d(t3, t3, 1, rnd);
mpfr_add_d(t4, t3, 2, rnd);
mpfr_div_d(t1, t1, i, rnd);
mpfr_mul(t1, t1, t2, rnd);
mpfr_add(t2, t2, alpha, rnd);
mpfr_div(t1, t1, t2, rnd);
mpfr_div(t3, t3, t4, rnd);
mpfr_mul(t3, t1, t3, rnd);
mpfr_rec_sqrt(t3, t3, rnd);
mpfr_add(t2, alpha, beta, rnd);
mpfr_add_d(t2, t2, i, rnd);
mpfr_add_d(t1, t2, i, rnd);
mpfr_div(t4, t1, t2, rnd);
mpfr_sub_d(t1, t1, 1, rnd);
mpfr_mul(t4, t1, t4, rnd);
mpfr_add(t1, gamma, delta, rnd);
mpfr_add_d(t1, t1, i, rnd);
mpfr_add_d(t2, t1, i, rnd);
mpfr_mul(t4, t1, t4, rnd);
mpfr_div(t4, t4, t2, rnd);
mpfr_sub_d(t2, t2, 1, rnd);
mpfr_div(t4, t4, t2, rnd);
mpfr_mul(t4, t4, sclcol[i-1], rnd);
mpfr_init2(sclcol[i], prec);
norm1 ? mpfr_mul(sclcol[i], t3, t4, rnd) : mpfr_set(sclcol[i], t4, rnd);
}
for (int j = 0; j < n; j++)
for (int i = 0; i <= j; i++) {
//V[i+j*n] = sclrow[i]*Vl[i+j*n]*sclcol[j];
mpfr_mul(V[i+j*n], sclrow[i], V[i+j*n], rnd);
mpfr_mul(V[i+j*n], V[i+j*n], sclcol[j], rnd);
}
ft_mpfr_destroy_triangular_banded(A);
ft_mpfr_destroy_triangular_banded(B);
for (int i = 0; i < n; i++) {
mpfr_clear(sclrow[i]);
mpfr_clear(sclcol[i]);
}
free(sclrow);
free(sclcol);
mpfr_clear(t1);
mpfr_clear(t2);
mpfr_clear(t3);
mpfr_clear(t4);
return V;
}
static inline ft_mpfr_triangular_banded * ft_mpfr_create_A_laguerre_to_laguerre(const int n, mpfr_srcptr alpha, mpfr_srcptr beta, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * A = ft_mpfr_calloc_triangular_banded(n, 1, prec);
mpfr_t v;
mpfr_init2(v, prec);
for (int i = 0; i < n; i++) {
mpfr_sub(v, alpha, beta, rnd);
mpfr_sub_d(v, v, i, rnd);
ft_mpfr_set_triangular_banded_index(A, v, i-1, i, rnd);
mpfr_set_d(v, i, rnd);
ft_mpfr_set_triangular_banded_index(A, v, i, i, rnd);
}
mpfr_clear(v);
return A;
}
static inline ft_mpfr_triangular_banded * ft_mpfr_create_B_laguerre_to_laguerre(const int n, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * B = ft_mpfr_calloc_triangular_banded(n, 1, prec);
mpfr_t v;
mpfr_init2(v, prec);
for (int i = 0; i < n; i++) {
mpfr_set_d(v, -1.0, rnd);
ft_mpfr_set_triangular_banded_index(B, v, i-1, i, rnd);
mpfr_set_d(v, 1.0, rnd);
ft_mpfr_set_triangular_banded_index(B, v, i, i, rnd);
}
mpfr_clear(v);
return B;
}
mpfr_t * ft_mpfr_plan_laguerre_to_laguerre(const int norm1, const int norm2, const int n, mpfr_srcptr alpha, mpfr_srcptr beta, mpfr_prec_t prec, mpfr_rnd_t rnd) {
ft_mpfr_triangular_banded * A = ft_mpfr_create_A_laguerre_to_laguerre(n, alpha, beta, prec, rnd);
ft_mpfr_triangular_banded * B = ft_mpfr_create_B_laguerre_to_laguerre(n, prec, rnd);
mpfr_t * V = malloc(n*n*sizeof(mpfr_t));
for (int j = 0; j < n; j++) {
for (int i = 0; i < n; i++) {
mpfr_init2(V[i+j*n], prec);
mpfr_set_zero(V[i+j*n], 1);
}
mpfr_set_d(V[j+j*n], 1.0, rnd);
}
ft_mpfr_triangular_banded_eigenvectors(A, B, V, prec, rnd);
mpfr_t * sclrow = malloc(n*sizeof(mpfr_t));
mpfr_t * sclcol = malloc(n*sizeof(mpfr_t));
mpfr_t t1, t2, t3;
mpfr_init2(t1, prec);
mpfr_init2(t2, prec);
mpfr_init2(t3, prec);
if (n > 0) {
//sclrow[0] = norm2 ? Y2(sqrt)(Y2(tgamma)(beta2+1)) : 1;
mpfr_add_d(t1, beta, 1.0, rnd);
mpfr_gamma(t2, t1, rnd);
mpfr_sqrt(t3, t2, rnd);
mpfr_init2(sclrow[0], prec);
norm2 ? mpfr_set(sclrow[0], t3, rnd) : mpfr_set_d(sclrow[0], 1.0, rnd);
//sclcol[0] = norm1 ? 1/Y2(sqrt)(Y2(tgamma)(alpha2+1)) : 1;
mpfr_add_d(t1, alpha, 1.0, rnd);
mpfr_gamma(t2, t1, rnd);
mpfr_rec_sqrt(t3, t2, rnd);
mpfr_init2(sclcol[0], prec);
norm1 ? mpfr_set(sclcol[0], t3, rnd) : mpfr_set_d(sclcol[0], 1.0, rnd);
}
for (int i = 1; i < n; i++) {
//sclrow[i] = norm2 ? Y2(sqrt)((i+beta2)/i)*sclrow[i-1] : 1;
mpfr_add_d(t1, beta, i, rnd);
mpfr_div_d(t2, t1, i, rnd);
mpfr_sqrt(t3, t2, rnd);
mpfr_init2(sclrow[i], prec);
norm2 ? mpfr_mul(sclrow[i], t3, sclrow[i-1], rnd) : mpfr_set_d(sclrow[i], 1.0, rnd);
//sclcol[i] = norm1 ? Y2(sqrt)(i/(i+alpha2))*sclcol[i-1] : 1;
mpfr_add_d(t1, alpha, i, rnd);
mpfr_d_div(t2, i, t1, rnd);
mpfr_sqrt(t3, t2, rnd);
mpfr_init2(sclcol[i], prec);
norm1 ? mpfr_mul(sclcol[i], t3, sclcol[i-1], rnd) : mpfr_set_d(sclcol[i], 1.0, rnd);
}
for (int j = 0; j < n; j++)
for (int i = 0; i <= j; i++) {
//V[i+j*n] = sclrow[i]*Vl[i+j*n]*sclcol[j];
mpfr_mul(V[i+j*n], sclrow[i], V[i+j*n], rnd);
mpfr_mul(V[i+j*n], V[i+j*n], sclcol[j], rnd);
}
ft_mpfr_destroy_triangular_banded(A);
ft_mpfr_destroy_triangular_banded(B);
for (int i = 0; i < n; i++) {
mpfr_clear(sclrow[i]);
mpfr_clear(sclcol[i]);
}
free(sclrow);
free(sclcol);
mpfr_clear(t1);
mpfr_clear(t2);
mpfr_clear(t3);
return V;
}
mpfr_t * ft_mpfr_plan_jacobi_to_ultraspherical(const int normjac, const int normultra, const int n, mpfr_srcptr alpha, mpfr_srcptr beta, mpfr_srcptr lambda, mpfr_prec_t prec, mpfr_rnd_t rnd) {
mpfr_t t1;
mpfr_init2(t1, prec);
mpfr_sub_d(t1, lambda, 0.5, rnd);
mpfr_t * V = ft_mpfr_plan_jacobi_to_jacobi(normjac, normultra, n, alpha, beta, t1, t1, prec, rnd);
if (normultra == 0) {
mpfr_t * sclrow = malloc(n*sizeof(mpfr_t));
if (n > 0) {
mpfr_init2(sclrow[0], prec);
mpfr_set_d(sclrow[0], 1.0, rnd);
}
mpfr_t t2;
mpfr_init2(t2, prec);
mpfr_mul_d(t2, lambda, 2, rnd);
mpfr_sub_d(t2, t2, 1, rnd);
for (int i = 1; i < n; i++) {
//sclrow[i] = (lambda+i-0.5)/(2*lambda+i-1)*sclrow[i-1];
mpfr_add_d(t1, t1, 1, rnd);
mpfr_add_d(t2, t2, 1, rnd);
mpfr_init2(sclrow[i], prec);
mpfr_div(sclrow[i], t1, t2, rnd);
mpfr_mul(sclrow[i], sclrow[i], sclrow[i-1], rnd);
}
for (int j = 0; j < n; j++)
for (int i = 0; i <= j; i++)
//V[i+j*n] *= sclrow[i];
mpfr_mul(V[i+j*n], V[i+j*n], sclrow[i], rnd);
for (int i = 0; i < n; i++)
mpfr_clear(sclrow[i]);
free(sclrow);
mpfr_clear(t2);
}
mpfr_clear(t1);
return V;
}
mpfr_t * ft_mpfr_plan_ultraspherical_to_jacobi(const int normultra, const int normjac, const int n, mpfr_srcptr lambda, mpfr_srcptr alpha, mpfr_srcptr beta, mpfr_prec_t prec, mpfr_rnd_t rnd) {
mpfr_t t1;
mpfr_init2(t1, prec);
mpfr_sub_d(t1, lambda, 0.5, rnd);
mpfr_t * V = ft_mpfr_plan_jacobi_to_jacobi(normultra, normjac, n, t1, t1, alpha, beta, prec, rnd);
if (normultra == 0) {
mpfr_t * sclcol = malloc(n*sizeof(mpfr_t));
if (n > 0) {
mpfr_init2(sclcol[0], prec);
mpfr_set_d(sclcol[0], 1.0, rnd);
}
mpfr_t t2;
mpfr_init2(t2, prec);
mpfr_mul_d(t2, lambda, 2, rnd);
mpfr_sub_d(t2, t2, 1, rnd);
for (int i = 1; i < n; i++) {
//sclcol[i] = (2*lambda+i-1)/(lambda+i-0.5)*sclcol[i-1];
mpfr_add_d(t1, t1, 1, rnd);
mpfr_add_d(t2, t2, 1, rnd);
mpfr_init2(sclcol[i], prec);
mpfr_div(sclcol[i], t2, t1, rnd);
mpfr_mul(sclcol[i], sclcol[i], sclcol[i-1], rnd);
}
for (int j = 0; j < n; j++)
for (int i = 0; i <= j; i++)
//V[i+j*n] *= sclcol[j];
mpfr_mul(V[i+j*n], V[i+j*n], sclcol[j], rnd);
for (int i = 0; i < n; i++)
mpfr_clear(sclcol[i]);
free(sclcol);
mpfr_clear(t2);
}
mpfr_clear(t1);
return V;
}
mpfr_t * ft_mpfr_plan_jacobi_to_chebyshev(const int normjac, const int normcheb, const int n, mpfr_srcptr alpha, mpfr_srcptr beta, mpfr_prec_t prec, mpfr_rnd_t rnd) {
mpfr_t t1;
mpfr_init2(t1, prec);
mpfr_set_d(t1, -0.5, rnd);
mpfr_t * V = ft_mpfr_plan_jacobi_to_jacobi(normjac, 1, n, alpha, beta, t1, t1, prec, rnd);
if (normcheb == 0) {
mpfr_t sqrt_1_pi, sqrt_2_pi;
mpfr_neg(t1, t1, rnd);
mpfr_init2(sqrt_1_pi, prec);
mpfr_gamma(sqrt_1_pi, t1, rnd);
mpfr_d_div(sqrt_1_pi, 1.0, sqrt_1_pi, rnd);
mpfr_init2(sqrt_2_pi, prec);
mpfr_sqrt(sqrt_2_pi, t1, rnd);
mpfr_div(sqrt_2_pi, sqrt_1_pi, sqrt_2_pi, rnd);
mpfr_t * sclrow = malloc(n*sizeof(mpfr_t));
for (int i = 0; i < n; i++) {
mpfr_init2(sclrow[i], prec);
i ? mpfr_set(sclrow[i], sqrt_2_pi, rnd) : mpfr_set(sclrow[i], sqrt_1_pi, rnd);
}
for (int j = 0; j < n; j++)
for (int i = 0; i <= j; i++)
mpfr_mul(V[i+j*n], V[i+j*n], sclrow[i], rnd);
for (int i = 0; i < n; i++)
mpfr_clear(sclrow[i]);
free(sclrow);
mpfr_clear(sqrt_1_pi);
mpfr_clear(sqrt_2_pi);
}
mpfr_clear(t1);
return V;
}
mpfr_t * ft_mpfr_plan_chebyshev_to_jacobi(const int normcheb, const int normjac, const int n, mpfr_srcptr alpha, mpfr_srcptr beta, mpfr_prec_t prec, mpfr_rnd_t rnd) {
mpfr_t t1;
mpfr_init2(t1, prec);
mpfr_set_d(t1, -0.5, rnd);
mpfr_t * V = ft_mpfr_plan_jacobi_to_jacobi(1, normjac, n, t1, t1, alpha, beta, prec, rnd);
if (normcheb == 0) {
mpfr_t sqrtpi, sqrtpi2;
mpfr_neg(t1, t1, rnd);
mpfr_init2(sqrtpi, prec);
mpfr_gamma(sqrtpi, t1, rnd);
mpfr_init2(sqrtpi2, prec);
mpfr_sqrt(sqrtpi2, t1, rnd);
mpfr_mul(sqrtpi2, sqrtpi, sqrtpi2, rnd);
mpfr_t * sclcol = malloc(n*sizeof(mpfr_t));
for (int i = 0; i < n; i++) {
mpfr_init2(sclcol[i], prec);
i ? mpfr_set(sclcol[i], sqrtpi2, rnd) : mpfr_set(sclcol[i], sqrtpi, rnd);
}
for (int j = 0; j < n; j++)
for (int i = 0; i <= j; i++)
mpfr_mul(V[i+j*n], V[i+j*n], sclcol[j], rnd);
for (int i = 0; i < n; i++)
mpfr_clear(sclcol[i]);
free(sclcol);
mpfr_clear(sqrtpi);
mpfr_clear(sqrtpi2);
}
mpfr_clear(t1);
return V;
}
mpfr_t * ft_mpfr_plan_ultraspherical_to_chebyshev(const int normultra, const int normcheb, const int n, mpfr_srcptr lambda, mpfr_prec_t prec, mpfr_rnd_t rnd) {
mpfr_t t1;
mpfr_init2(t1, prec);
mpfr_set_d(t1, -0.5, rnd);
mpfr_t * V = ft_mpfr_plan_ultraspherical_to_jacobi(normultra, 1, n, lambda, t1, t1, prec, rnd);
if (normcheb == 0) {
mpfr_t sqrt_1_pi, sqrt_2_pi;
mpfr_neg(t1, t1, rnd);
mpfr_init2(sqrt_1_pi, prec);
mpfr_gamma(sqrt_1_pi, t1, rnd);
mpfr_d_div(sqrt_1_pi, 1.0, sqrt_1_pi, rnd);
mpfr_init2(sqrt_2_pi, prec);
mpfr_sqrt(sqrt_2_pi, t1, rnd);
mpfr_div(sqrt_2_pi, sqrt_1_pi, sqrt_2_pi, rnd);
mpfr_t * sclrow = malloc(n*sizeof(mpfr_t));
for (int i = 0; i < n; i++) {
mpfr_init2(sclrow[i], prec);
i ? mpfr_set(sclrow[i], sqrt_2_pi, rnd) : mpfr_set(sclrow[i], sqrt_1_pi, rnd);
}
for (int j = 0; j < n; j++)
for (int i = j; i >= 0; i -= 2)
mpfr_mul(V[i+j*n], V[i+j*n], sclrow[i], rnd);
for (int i = 0; i < n; i++)
mpfr_clear(sclrow[i]);
free(sclrow);
mpfr_clear(sqrt_1_pi);
mpfr_clear(sqrt_2_pi);
}
mpfr_clear(t1);
return V;
}
mpfr_t * ft_mpfr_plan_chebyshev_to_ultraspherical(const int normcheb, const int normultra, const int n, mpfr_srcptr lambda, mpfr_prec_t prec, mpfr_rnd_t rnd) {
mpfr_t t1;
mpfr_init2(t1, prec);
mpfr_set_d(t1, -0.5, rnd);
mpfr_t * V = ft_mpfr_plan_jacobi_to_ultraspherical(1, normultra, n, t1, t1, lambda, prec, rnd);
if (normcheb == 0) {
mpfr_t sqrtpi, sqrtpi2;
mpfr_neg(t1, t1, rnd);
mpfr_init2(sqrtpi, prec);
mpfr_gamma(sqrtpi, t1, rnd);
mpfr_init2(sqrtpi2, prec);
mpfr_sqrt(sqrtpi2, t1, rnd);
mpfr_mul(sqrtpi2, sqrtpi, sqrtpi2, rnd);
mpfr_t * sclcol = malloc(n*sizeof(mpfr_t));
for (int i = 0; i < n; i++) {
mpfr_init2(sclcol[i], prec);
i ? mpfr_set(sclcol[i], sqrtpi2, rnd) : mpfr_set(sclcol[i], sqrtpi, rnd);
}
for (int j = 0; j < n; j++)
for (int i = j; i >= 0; i -= 2)
mpfr_mul(V[i+j*n], V[i+j*n], sclcol[j], rnd);
for (int i = 0; i < n; i++)
mpfr_clear(sclcol[i]);
free(sclcol);
mpfr_clear(sqrtpi);
mpfr_clear(sqrtpi2);
}
mpfr_clear(t1);
return V;
}
|
conv3x3s1_winograd64_transform_kernel_pack4_neon_GgG.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv3x3s1_winograd64_transform_kernel_pack4_neon_GgG(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// winograd63 transform kernel
Mat kernel_tm = kernel_tm_pack4;
kernel_tm.create(8*8, inch, outch);
const float ktm[8][3] = {
{ 1.0f, 0.0f, 0.0f},
{-2.0f/9, -2.0f/9, -2.0f/9},
{-2.0f/9, 2.0f/9, -2.0f/9},
{1.0f/90, 1.0f/45, 2.0f/45},
{1.0f/90, -1.0f/45, 2.0f/45},
{1.0f/45, 1.0f/90, 1.0f/180},
{1.0f/45, -1.0f/90, 1.0f/180},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i=0; i<8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j=0; j<8; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<8; i++)
{
kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
}
|
GraphMatRuntime.h | /******************************************************************************
** Copyright (c) 2015, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ******************************************************************************/
/* Narayanan Sundaram (Intel Corp.)
* ******************************************************************************/
#include "GMDP/gmdp.h"
//#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <vector>
#include <utility>
#include <sys/time.h>
#ifdef __ASSERT
#include <assert.h>
#endif
#include "Graph.h"
#include "GraphProgram.h"
#include "SPMV.h"
namespace GraphMat {
const int UNTIL_CONVERGENCE = -1;
template<class T, class U, class V>
struct run_graph_program_temp_structure {
GraphMat::SpVec<GraphMat::DenseSegment<T> >* px;
GraphMat::SpVec<GraphMat::DenseSegment<U> >* py;
};
template<class T, class U, class V, class E>
struct run_graph_program_temp_structure<T,U,V> graph_program_init(const GraphProgram<T,U,V,E>& gp, const Graph<V, E>& g) {
struct run_graph_program_temp_structure<T,U,V> rgpts;
rgpts.px = new GraphMat::SpVec<GraphMat::DenseSegment<T> >(g.nvertices, GraphMat::get_global_nrank(), GraphMat::vector_partition_fn);
T _t;
rgpts.px->setAll(_t);
rgpts.py = new GraphMat::SpVec<GraphMat::DenseSegment<U> >(g.nvertices, GraphMat::get_global_nrank(), GraphMat::vector_partition_fn);
U _u;
rgpts.py->setAll(_u);
return rgpts;
}
template<class T, class U, class V>
void graph_program_clear(struct run_graph_program_temp_structure<T,U,V>& rgpts) {
delete rgpts.px;
delete rgpts.py;
}
template <class T,class U, class V, class E>
void send_message(const bool& a, const V& _v, T* b, void* gpv) {
GraphProgram<T,U,V,E>* gp = (GraphProgram<T,U,V,E>*) gpv;
if(a == true) {
gp->send_message(_v, *b);
}
}
template <class T, class U, class V, class E>
void apply_func(const U& y, V* b, void* gpv) {
GraphProgram<T,U,V,E>* gp = (GraphProgram<T,U,V,E>*) gpv;
gp->apply(y, *b);
}
template <class T, typename U, class V, class E>
void run_graph_program(GraphProgram<T,U,V,E>* gp, Graph<V,E>& g, int iterations=1, struct run_graph_program_temp_structure<T,U,V>* rgpts=NULL) { //iterations = -1 ==> until convergence
int it = 0;
int converged = 1;
struct timeval start, end, init_start, init_end, iteration_start, iteration_end;
double time;
int global_myrank = GraphMat::get_global_myrank();
gettimeofday(&init_start, 0);
auto act = gp->getActivity();
GraphMat::SpVec<GraphMat::DenseSegment<T> >* px;
GraphMat::SpVec<GraphMat::DenseSegment<U> >* py;
if (rgpts == NULL) {
px = new GraphMat::SpVec<GraphMat::DenseSegment<T> >(g.nvertices, GraphMat::get_global_nrank(), GraphMat::vector_partition_fn);
T _t;
px->setAll(_t);
py = new GraphMat::SpVec<GraphMat::DenseSegment<U> >(g.nvertices, GraphMat::get_global_nrank(), GraphMat::vector_partition_fn);
U _u;
py->setAll(_u);
}
GraphMat::SpVec<GraphMat::DenseSegment<T> >& x = (rgpts==NULL)?(*px):*(rgpts->px);//*px;
GraphMat::SpVec<GraphMat::DenseSegment<U> >& y = (rgpts==NULL)?(*py):*(rgpts->py);//*py;
if (act == ALL_VERTICES) {
g.setAllActive();
}
#ifdef __TIMING
printf("Nvertices = %d \n", g.getNumberOfVertices());
#endif
gettimeofday(&init_end, 0);
#ifdef __TIMING
time = (init_end.tv_sec-init_start.tv_sec)*1e3+(init_end.tv_usec-init_start.tv_usec)*1e-3;
printf("GraphMat init time = %f ms \n", time);
#endif
while(1) {
gettimeofday(&iteration_start, 0);
GraphMat::Clear(&x);
GraphMat::Clear(&y);
converged = 1;
gettimeofday(&start, 0);
GraphMat::IntersectReduce(g.active, g.vertexproperty, &x, send_message<T,U,V,E>, (void*)gp);
#ifdef __TIMING
printf("x.length = %d \n", x.getNNZ());
#endif
gettimeofday(&end, 0);
#ifdef __TIMING
time = (end.tv_sec-start.tv_sec)*1e3+(end.tv_usec-start.tv_usec)*1e-3;
printf("Send message time = %.3f ms \n", time);
#endif
gettimeofday(&start, 0);
//do SpMV
if (gp->getOrder() == OUT_EDGES) {
SpMTSpV(g, gp, &x, &y);
} else if (gp->getOrder() == IN_EDGES) {
SpMSpV(g, gp, &x, &y);
} else if (gp->getOrder() == ALL_EDGES) {
SpMTSpV(g, gp, &x, &y);
SpMSpV(g, gp, &x, &y);
} else {
printf("Unrecognized option \n");
exit(1);
}
gettimeofday(&end, 0);
#ifdef __TIMING
time = (end.tv_sec-start.tv_sec)*1e3+(end.tv_usec-start.tv_usec)*1e-3;
printf("SPMV time = %.3f ms \n", time);
#endif
gettimeofday(&start, 0);
g.setAllInactive();
//update state and activity and check for convergence if needed
int nout = 0;
int total_search = 0;
int local_converged = 1;
converged = 1;
//GraphMat::IntersectReduce(g.active, y, &g.vertexproperty, set_y<U,V>);
//auto apply_func = set_y_apply<U,V>;
//GraphMat::Apply(y, &g.vertexproperty, apply_func<T,U,V>, (void*)gp);
for(int segmentId = 0 ; segmentId < y.nsegments ; segmentId++)
{
if(y.nodeIds[segmentId] == global_myrank)
{
auto segment = y.segments[segmentId]->properties;
auto vpValueArray = g.vertexproperty->segments[segmentId]->properties->value;
#pragma omp parallel for reduction(&:local_converged)
for (int i = 0; i < y.segments[segmentId]->num_ints; i++) {
unsigned int value = segment->bit_vector[i];
while (value != 0) {
int last_bit = _bit_scan_forward(value);
int idx = i*32 + last_bit;
V old_prop;
//old_prop = g.vertexproperty.segments[segmentId].properties->value[idx];
old_prop = vpValueArray[idx];
//gp->apply(segment->value[idx], g.vertexproperty.segments[segmentId].properties->value[idx]);
gp->apply(segment->value[idx], vpValueArray[idx]);
if (old_prop != vpValueArray[idx]) {
g.active->segments[segmentId]->properties->value[idx] = true;
GraphMat::set_bitvector(idx, g.active->segments[segmentId]->properties->bit_vector);
local_converged = 0;
}
value &= (~(1<<last_bit));
}
}
}
}
MPI_Allreduce(&local_converged, &converged, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);
gettimeofday(&end, 0);
#ifdef __TIMING
time = (end.tv_sec-start.tv_sec)*1e3+(end.tv_usec-start.tv_usec)*1e-3;
printf("Apply time = %.3f ms \n", time);
#endif
gettimeofday(&start, 0);
gp->do_every_iteration(it);
#ifdef __TIMING
gettimeofday(&end, 0);
time = (end.tv_sec-start.tv_sec)*1e3+(end.tv_usec-start.tv_usec)*1e-3;
printf("Do every iteration time = %.3f ms \n", time);
#endif
gettimeofday(&iteration_end, 0);
#ifdef __TIMING
time = (iteration_end.tv_sec-iteration_start.tv_sec)*1e3+(iteration_end.tv_usec-iteration_start.tv_usec)*1e-3;
printf("Iteration %d :: %f msec :: updated %d vertices :: changed %d vertices \n", it, time, y.getNNZ(), g.active->getNNZ());
#endif
if (act == ALL_VERTICES) {
g.setAllActive();
}
it++;
if (it == iterations) {
break;
}
if (iterations <= 0 && converged == 1) {
break;
}
}
struct timeval clear_start, clear_end;
gettimeofday(&clear_start, 0);
if (rgpts == NULL) {
delete px;
delete py;
}
gettimeofday(&clear_end, 0);
#ifdef __TIMING
time = (clear_end.tv_sec-clear_start.tv_sec)*1e3+(clear_end.tv_usec-clear_start.tv_usec)*1e-3;
printf("GraphMat clear time = %f msec \n", time);
#endif
printf("Completed %d iterations \n", it);
}
} //namespace GraphMat
|
GB_binop__min_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_uint16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__min_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__min_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_uint16)
// A*D function (colscale): GB (_AxD__min_uint16)
// D*A function (rowscale): GB (_DxB__min_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__min_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__min_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_uint16)
// C=scalar+B GB (_bind1st__min_uint16)
// C=scalar+B' GB (_bind1st_tran__min_uint16)
// C=A+scalar GB (_bind2nd__min_uint16)
// C=A'+scalar GB (_bind2nd_tran__min_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMIN (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_UINT16 || GxB_NO_MIN_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__min_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_uint8_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_bool
// op(A') function: GB_tran__ainv_uint8_bool
// C type: uint8_t
// A type: bool
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_bool
(
uint8_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_uint8_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_uint16
// op(A') function: GB_tran__ainv_uint8_uint16
// C type: uint8_t
// A type: uint16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_uint16
(
uint8_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
icv-2.c | /* { dg-do run { target *-*-linux* *-*-gnu* *-*-freebsd* } } */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE 1
#endif
#include <pthread.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
pthread_barrier_t bar;
void *tf (void *p)
{
int l;
if (p)
omp_set_num_threads (3);
pthread_barrier_wait (&bar);
if (!p)
omp_set_num_threads (6);
pthread_barrier_wait (&bar);
omp_set_dynamic (0);
if (omp_get_max_threads () != (p ? 3 : 6))
abort ();
l = 0;
#pragma omp parallel num_threads (6) reduction (|:l)
{
l |= omp_get_max_threads () != (p ? 3 : 6);
omp_set_num_threads ((p ? 3 : 6) + omp_get_thread_num ());
l |= omp_get_max_threads () != ((p ? 3 : 6) + omp_get_thread_num ());
}
if (l)
abort ();
return NULL;
}
int
main (void)
{
pthread_t th;
pthread_barrier_init (&bar, NULL, 2);
pthread_create (&th, NULL, tf, NULL);
tf ("");
pthread_join (th, NULL);
return 0;
}
|
SP2.c |
/////////////////////////// 8INF854 - ARCHITECTURES PARRALLELES - DEVOIR #2 ///////////////////////////////////
///////////////////////////// SP1.c - Corentin RAOULT - Adrien Cambillau /////////////////////////////////////
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
////////////////////// déclaration des fonctions /////////////////////////
int digit_to_int(char d);
void remplirTABrand(int* TAB, int n);
void afficherTAB(int* TAB, int n);
int* SP2(int* T, int n);
///////////////////// MAIN ////////////////////////////////////////////////
int main(int argc, char* argv[])
{
int n;
n = 102400;
int* S;
int* T = malloc(n*sizeof(int));
remplirTABrand(T,n);
afficherTAB(T,n);
double fin;
double debut = omp_get_wtime();//--> encore un pb, mesure le temps sur un thread
S = SP2(T,n);
fin = omp_get_wtime();
afficherTAB(S,n);
printf("durée = %lf\n", fin - debut);
return EXIT_SUCCESS;
}
/////////////////// développement des fonctions /////////////////////////////////
void remplirTABrand(int* TAB, int n)
{
int i;
srand(time(NULL));
for(i=0;i<n;i++)
TAB[i] = rand()%10000; //limité par unsigned long long int
}
void afficherTAB(int* TAB, int n)
{
int j;
printf("TAB : { ");
for(j = 0; j < n; j++)
{
printf(" [%d] ",TAB[j]);
}
printf(" }\n");
}
int* SP2(int* T, int n)
{
int * S = malloc((n)*sizeof(int));
int i;
int j;
int somme;
#pragma omp parallel for
for(i = 0; i < n ; i++){
somme = 0;
#pragma omp parallel for reduction(+:somme)
for(j = 0; j < i; j++){
somme += T[j];
S[i] = somme;
}
}
return S;
}
|
GB_unaryop__minv_uint32_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint32_uint8
// op(A') function: GB_tran__minv_uint32_uint8
// C type: uint32_t
// A type: uint8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 32)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 32) ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint32_uint8
(
uint32_t *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-target.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp target
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:4:1, col:19>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CapturedStmt {{.*}} <col:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-NullStmt {{.*}} <col:3>
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
// CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-NullStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
|
deriche-dace.c | /* DaCe AUTO-GENERATED FILE. DO NOT MODIFY */ ////__DACE:0
#include <dace/dace.h> ////__DACE:0
////__DACE:0
void FOR26_2_3_0(int& ___w, int& ___h, float* ___imgIn, int _argcount, int i) { ////__DACE:2:3:0
long long j; ////__DACE:3
////__DACE:3
for (j = 0; (j < ___h); j = j+1) { ////__DACE:3:2
{ ////__DACE:3:3
{ ////__DACE:3:3:1
float imgIn; ////__DACE:3:3:1 ////__DACE:3:3:1
////__DACE:3:3:1 ////__DACE:3:3:1
/////////////////// ////__DACE:3:3:1 ////__DACE:3:3:1
imgIn=float)(((313*i+991*j)%65536)/65535.0f; ////__DACE:3:3:1 ////__DACE:3:3:1
/////////////////// ////__DACE:3:3:1 ////__DACE:3:3:1
////__DACE:3:3:1 ////__DACE:3:3:1
___imgIn[((4096 * i) + j)] = imgIn; ////__DACE:3:3:1 ////__DACE:3:3:1
} ////__DACE:3:3:1
} ////__DACE:3:3
} ////__DACE:3:2
} ////__DACE:2:3:0
////__DACE:2:3:0
void init_array_1_0_4(int& __w, int& __h, float& __alpha, float* __imgIn, int _argcount) { ////__DACE:1:0:4
long long i; ////__DACE:2
////__DACE:2
{ ////__DACE:2:0
{ ////__DACE:2:0:0
float alpha; ////__DACE:2:0:0 ////__DACE:2:0:0
////__DACE:2:0:0 ////__DACE:2:0:0
/////////////////// ////__DACE:2:0:0 ////__DACE:2:0:0
*alpha=0.25; ////__DACE:2:0:0 ////__DACE:2:0:0
/////////////////// ////__DACE:2:0:0 ////__DACE:2:0:0
////__DACE:2:0:0 ////__DACE:2:0:0
__alpha = alpha; ////__DACE:2:0:0 ////__DACE:2:0:0
} ////__DACE:2:0:0
} ////__DACE:2:0
for (i = 0; (i < __w); i = i+1) { ////__DACE:2:2
{ ////__DACE:2:3
FOR26_2_3_0(__w, __h, &__imgIn[0], _argcount, i); ////__DACE:2:3:0
} ////__DACE:2:3
} ////__DACE:2:2
} ////__DACE:1:0:4
////__DACE:1:0:4
void FOR87_5_3_0(int& ____h, float* ____imgIn, float* ____y1, float& ___xm1, float& ___ym1, float& ___ym2, float& ___a1, float& ___a2, float& ___b1, float& ___b2, int _argcount, int i, int j) { ////__DACE:5:3:0
////__DACE:6
{ ////__DACE:6:0
#pragma omp parallel sections
{
#pragma omp section
{
{ ////__DACE:6:0:0
float a1 = ___a1; ////__DACE:6:0:1,0 ////__DACE:6:0:0
float imgIn = ____imgIn[((4096 * i) + j)]; ////__DACE:6:0:2,0 ////__DACE:6:0:0
float a2 = ___a2; ////__DACE:6:0:3,0 ////__DACE:6:0:0
float xm1 = ___xm1; ////__DACE:6:0:4,0 ////__DACE:6:0:0
float b1 = ___b1; ////__DACE:6:0:5,0 ////__DACE:6:0:0
float ym1 = ___ym1; ////__DACE:6:0:6,0 ////__DACE:6:0:0
float b2 = ___b2; ////__DACE:6:0:7,0 ////__DACE:6:0:0
float ym2 = ___ym2; ////__DACE:6:0:8,0 ////__DACE:6:0:0
float y1; ////__DACE:6:0:0 ////__DACE:6:0:0
////__DACE:6:0:0 ////__DACE:6:0:0
/////////////////// ////__DACE:6:0:0 ////__DACE:6:0:0
y1=a1*imgIn+a2*xm1+b1*ym1+b2*ym2; ////__DACE:6:0:0 ////__DACE:6:0:0
/////////////////// ////__DACE:6:0:0 ////__DACE:6:0:0
////__DACE:6:0:0 ////__DACE:6:0:0
____y1[((4096 * i) + j)] = y1; ////__DACE:6:0:0 ////__DACE:6:0:0
} ////__DACE:6:0:0
{ ////__DACE:6:0:14
float y1 = ____y1[((4096 * i) + j)]; ////__DACE:6:0:9,14 ////__DACE:6:0:14
float ym1; ////__DACE:6:0:14 ////__DACE:6:0:14
////__DACE:6:0:14 ////__DACE:6:0:14
/////////////////// ////__DACE:6:0:14 ////__DACE:6:0:14
ym1=y1; ////__DACE:6:0:14 ////__DACE:6:0:14
/////////////////// ////__DACE:6:0:14 ////__DACE:6:0:14
////__DACE:6:0:14 ////__DACE:6:0:14
___ym1 = ym1; ////__DACE:6:0:14 ////__DACE:6:0:14
} ////__DACE:6:0:14
} // End omp section
#pragma omp section
{
{ ////__DACE:6:0:10
float imgIn = ____imgIn[((4096 * i) + j)]; ////__DACE:6:0:2,10 ////__DACE:6:0:10
float xm1; ////__DACE:6:0:10 ////__DACE:6:0:10
////__DACE:6:0:10 ////__DACE:6:0:10
/////////////////// ////__DACE:6:0:10 ////__DACE:6:0:10
xm1=imgIn; ////__DACE:6:0:10 ////__DACE:6:0:10
/////////////////// ////__DACE:6:0:10 ////__DACE:6:0:10
////__DACE:6:0:10 ////__DACE:6:0:10
___xm1 = xm1; ////__DACE:6:0:10 ////__DACE:6:0:10
} ////__DACE:6:0:10
} // End omp section
#pragma omp section
{
{ ////__DACE:6:0:12
float ym1 = ___ym1; ////__DACE:6:0:6,12 ////__DACE:6:0:12
float ym2; ////__DACE:6:0:12 ////__DACE:6:0:12
////__DACE:6:0:12 ////__DACE:6:0:12
/////////////////// ////__DACE:6:0:12 ////__DACE:6:0:12
ym2=ym1; ////__DACE:6:0:12 ////__DACE:6:0:12
/////////////////// ////__DACE:6:0:12 ////__DACE:6:0:12
////__DACE:6:0:12 ////__DACE:6:0:12
___ym2 = ym2; ////__DACE:6:0:12 ////__DACE:6:0:12
} ////__DACE:6:0:12
} // End omp section
} // End omp sections
} ////__DACE:6:0
} ////__DACE:5:3:0
////__DACE:5:3:0
void FOR83_4_2_0(int& ___w, int& ___h, float* ___imgIn, float* ___y1, float& __xm1, float& __ym1, float& __ym2, float& __a1, float& __a2, float& __b1, float& __b2, int _argcount, int i) { ////__DACE:4:2:0
long long j; ////__DACE:5
////__DACE:5
{ ////__DACE:5:0
#pragma omp parallel sections
{
#pragma omp section
{
{ ////__DACE:5:0:0
float ym1; ////__DACE:5:0:0 ////__DACE:5:0:0
////__DACE:5:0:0 ////__DACE:5:0:0
/////////////////// ////__DACE:5:0:0 ////__DACE:5:0:0
ym1=0.0f; ////__DACE:5:0:0 ////__DACE:5:0:0
/////////////////// ////__DACE:5:0:0 ////__DACE:5:0:0
////__DACE:5:0:0 ////__DACE:5:0:0
__ym1 = ym1; ////__DACE:5:0:0 ////__DACE:5:0:0
} ////__DACE:5:0:0
} // End omp section
#pragma omp section
{
{ ////__DACE:5:0:2
float ym2; ////__DACE:5:0:2 ////__DACE:5:0:2
////__DACE:5:0:2 ////__DACE:5:0:2
/////////////////// ////__DACE:5:0:2 ////__DACE:5:0:2
ym2=0.0f; ////__DACE:5:0:2 ////__DACE:5:0:2
/////////////////// ////__DACE:5:0:2 ////__DACE:5:0:2
////__DACE:5:0:2 ////__DACE:5:0:2
__ym2 = ym2; ////__DACE:5:0:2 ////__DACE:5:0:2
} ////__DACE:5:0:2
} // End omp section
#pragma omp section
{
{ ////__DACE:5:0:4
float xm1; ////__DACE:5:0:4 ////__DACE:5:0:4
////__DACE:5:0:4 ////__DACE:5:0:4
/////////////////// ////__DACE:5:0:4 ////__DACE:5:0:4
xm1=0.0f; ////__DACE:5:0:4 ////__DACE:5:0:4
/////////////////// ////__DACE:5:0:4 ////__DACE:5:0:4
////__DACE:5:0:4 ////__DACE:5:0:4
__xm1 = xm1; ////__DACE:5:0:4 ////__DACE:5:0:4
} ////__DACE:5:0:4
} // End omp section
} // End omp sections
} ////__DACE:5:0
for (j = 0; (j < ___h); j = j+1) { ////__DACE:5:2
{ ////__DACE:5:3
FOR87_5_3_0(___h, &___imgIn[0], &___y1[0], __xm1, __ym1, __ym2, __a1, __a2, __b1, __b2, _argcount, i, j); ////__DACE:5:3:0
} ////__DACE:5:3
} ////__DACE:5:2
} ////__DACE:4:2:0
////__DACE:4:2:0
void FOR100_7_2_0(int& ____h, float* ____imgIn, float* ____y2, float& ___xp1, float& ___xp2, float& ___yp1, float& ___yp2, float& ___a3, float& ___a4, float& ___b1, float& ___b2, int _argcount, int i, int j) { ////__DACE:7:2:0
////__DACE:8
{ ////__DACE:8:0
#pragma omp parallel sections
{
#pragma omp section
{
{ ////__DACE:8:0:0
float a3 = ___a3; ////__DACE:8:0:1,0 ////__DACE:8:0:0
float xp1 = ___xp1; ////__DACE:8:0:2,0 ////__DACE:8:0:0
float a4 = ___a4; ////__DACE:8:0:3,0 ////__DACE:8:0:0
float xp2 = ___xp2; ////__DACE:8:0:4,0 ////__DACE:8:0:0
float b1 = ___b1; ////__DACE:8:0:5,0 ////__DACE:8:0:0
float yp1 = ___yp1; ////__DACE:8:0:6,0 ////__DACE:8:0:0
float b2 = ___b2; ////__DACE:8:0:7,0 ////__DACE:8:0:0
float yp2 = ___yp2; ////__DACE:8:0:8,0 ////__DACE:8:0:0
float y2; ////__DACE:8:0:0 ////__DACE:8:0:0
////__DACE:8:0:0 ////__DACE:8:0:0
/////////////////// ////__DACE:8:0:0 ////__DACE:8:0:0
y2=a3*xp1+a4*xp2+b1*yp1+b2*yp2; ////__DACE:8:0:0 ////__DACE:8:0:0
/////////////////// ////__DACE:8:0:0 ////__DACE:8:0:0
////__DACE:8:0:0 ////__DACE:8:0:0
____y2[((4096 * i) + j)] = y2; ////__DACE:8:0:0 ////__DACE:8:0:0
} ////__DACE:8:0:0
{ ////__DACE:8:0:17
float y2 = ____y2[((4096 * i) + j)]; ////__DACE:8:0:9,17 ////__DACE:8:0:17
float yp1; ////__DACE:8:0:17 ////__DACE:8:0:17
////__DACE:8:0:17 ////__DACE:8:0:17
/////////////////// ////__DACE:8:0:17 ////__DACE:8:0:17
yp1=y2; ////__DACE:8:0:17 ////__DACE:8:0:17
/////////////////// ////__DACE:8:0:17 ////__DACE:8:0:17
////__DACE:8:0:17 ////__DACE:8:0:17
___yp1 = yp1; ////__DACE:8:0:17 ////__DACE:8:0:17
} ////__DACE:8:0:17
} // End omp section
#pragma omp section
{
{ ////__DACE:8:0:10
float xp1 = ___xp1; ////__DACE:8:0:2,10 ////__DACE:8:0:10
float xp2; ////__DACE:8:0:10 ////__DACE:8:0:10
////__DACE:8:0:10 ////__DACE:8:0:10
/////////////////// ////__DACE:8:0:10 ////__DACE:8:0:10
xp2=xp1; ////__DACE:8:0:10 ////__DACE:8:0:10
/////////////////// ////__DACE:8:0:10 ////__DACE:8:0:10
////__DACE:8:0:10 ////__DACE:8:0:10
___xp2 = xp2; ////__DACE:8:0:10 ////__DACE:8:0:10
} ////__DACE:8:0:10
} // End omp section
#pragma omp section
{
{ ////__DACE:8:0:15
float yp1 = ___yp1; ////__DACE:8:0:6,15 ////__DACE:8:0:15
float yp2; ////__DACE:8:0:15 ////__DACE:8:0:15
////__DACE:8:0:15 ////__DACE:8:0:15
/////////////////// ////__DACE:8:0:15 ////__DACE:8:0:15
yp2=yp1; ////__DACE:8:0:15 ////__DACE:8:0:15
/////////////////// ////__DACE:8:0:15 ////__DACE:8:0:15
////__DACE:8:0:15 ////__DACE:8:0:15
___yp2 = yp2; ////__DACE:8:0:15 ////__DACE:8:0:15
} ////__DACE:8:0:15
} // End omp section
#pragma omp section
{
{ ////__DACE:8:0:12
float imgIn = ____imgIn[((4096 * i) + j)]; ////__DACE:8:0:13,12 ////__DACE:8:0:12
float xp1; ////__DACE:8:0:12 ////__DACE:8:0:12
////__DACE:8:0:12 ////__DACE:8:0:12
/////////////////// ////__DACE:8:0:12 ////__DACE:8:0:12
xp1=imgIn; ////__DACE:8:0:12 ////__DACE:8:0:12
/////////////////// ////__DACE:8:0:12 ////__DACE:8:0:12
////__DACE:8:0:12 ////__DACE:8:0:12
___xp1 = xp1; ////__DACE:8:0:12 ////__DACE:8:0:12
} ////__DACE:8:0:12
} // End omp section
} // End omp sections
} ////__DACE:8:0
} ////__DACE:7:2:0
////__DACE:7:2:0
void FOR95_4_4_0(int& ___w, int& ___h, float* ___imgIn, float* ___y2, float& __xp1, float& __xp2, float& __yp1, float& __yp2, float& __a3, float& __a4, float& __b1, float& __b2, int _argcount, int i) { ////__DACE:4:4:0
long long j; ////__DACE:7
////__DACE:7
__state_7_state96:; ////__DACE:7:0
{ ////__DACE:7:0
#pragma omp parallel sections
{
#pragma omp section
{
{ ////__DACE:7:0:0
float yp1; ////__DACE:7:0:0 ////__DACE:7:0:0
////__DACE:7:0:0 ////__DACE:7:0:0
/////////////////// ////__DACE:7:0:0 ////__DACE:7:0:0
yp1=0.0f; ////__DACE:7:0:0 ////__DACE:7:0:0
/////////////////// ////__DACE:7:0:0 ////__DACE:7:0:0
////__DACE:7:0:0 ////__DACE:7:0:0
__yp1 = yp1; ////__DACE:7:0:0 ////__DACE:7:0:0
} ////__DACE:7:0:0
} // End omp section
#pragma omp section
{
{ ////__DACE:7:0:2
float yp2; ////__DACE:7:0:2 ////__DACE:7:0:2
////__DACE:7:0:2 ////__DACE:7:0:2
/////////////////// ////__DACE:7:0:2 ////__DACE:7:0:2
yp2=0.0f; ////__DACE:7:0:2 ////__DACE:7:0:2
/////////////////// ////__DACE:7:0:2 ////__DACE:7:0:2
////__DACE:7:0:2 ////__DACE:7:0:2
__yp2 = yp2; ////__DACE:7:0:2 ////__DACE:7:0:2
} ////__DACE:7:0:2
} // End omp section
#pragma omp section
{
{ ////__DACE:7:0:4
float xp1; ////__DACE:7:0:4 ////__DACE:7:0:4
////__DACE:7:0:4 ////__DACE:7:0:4
/////////////////// ////__DACE:7:0:4 ////__DACE:7:0:4
xp1=0.0f; ////__DACE:7:0:4 ////__DACE:7:0:4
/////////////////// ////__DACE:7:0:4 ////__DACE:7:0:4
////__DACE:7:0:4 ////__DACE:7:0:4
__xp1 = xp1; ////__DACE:7:0:4 ////__DACE:7:0:4
} ////__DACE:7:0:4
} // End omp section
#pragma omp section
{
{ ////__DACE:7:0:6
float xp2; ////__DACE:7:0:6 ////__DACE:7:0:6
////__DACE:7:0:6 ////__DACE:7:0:6
/////////////////// ////__DACE:7:0:6 ////__DACE:7:0:6
xp2=0.0f; ////__DACE:7:0:6 ////__DACE:7:0:6
/////////////////// ////__DACE:7:0:6 ////__DACE:7:0:6
////__DACE:7:0:6 ////__DACE:7:0:6
__xp2 = xp2; ////__DACE:7:0:6 ////__DACE:7:0:6
} ////__DACE:7:0:6
} // End omp section
} // End omp sections
} ////__DACE:7:0
if ((j >= 0)) { ////__DACE:7:0
goto __state_7_stateFOR100; ////__DACE:7:0
}
if (((j >= 0) == false)) { ////__DACE:7:0
goto __state_7_MergeState100; ////__DACE:7:0
}
__state_7_stateFOR100:; ////__DACE:7:2
{ ////__DACE:7:2
FOR100_7_2_0(___h, &___imgIn[0], &___y2[0], __xp1, __xp2, __yp1, __yp2, __a3, __a4, __b1, __b2, _argcount, i, j); ////__DACE:7:2:0
} ////__DACE:7:2
j = j-1; ////__DACE:7:2
goto __state_7_state96; ////__DACE:7:2
__state_7_MergeState100:; ////__DACE:7:1
} ////__DACE:4:4:0
////__DACE:4:4:0
void FOR118_9_3_0(int& ____w, float* ____imgOut, float* ____y1, float& ___tm1, float& ___ym1, float& ___ym2, float& ___a5, float& ___a6, float& ___b1, float& ___b2, int _argcount, int i, int j) { ////__DACE:9:3:0
////__DACE:10
{ ////__DACE:10:0
#pragma omp parallel sections
{
#pragma omp section
{
{ ////__DACE:10:0:0
float a5 = ___a5; ////__DACE:10:0:1,0 ////__DACE:10:0:0
float imgOut = ____imgOut[((4096 * i) + j)]; ////__DACE:10:0:2,0 ////__DACE:10:0:0
float a6 = ___a6; ////__DACE:10:0:3,0 ////__DACE:10:0:0
float tm1 = ___tm1; ////__DACE:10:0:4,0 ////__DACE:10:0:0
float b1 = ___b1; ////__DACE:10:0:5,0 ////__DACE:10:0:0
float ym1 = ___ym1; ////__DACE:10:0:6,0 ////__DACE:10:0:0
float b2 = ___b2; ////__DACE:10:0:7,0 ////__DACE:10:0:0
float ym2 = ___ym2; ////__DACE:10:0:8,0 ////__DACE:10:0:0
float y1; ////__DACE:10:0:0 ////__DACE:10:0:0
////__DACE:10:0:0 ////__DACE:10:0:0
/////////////////// ////__DACE:10:0:0 ////__DACE:10:0:0
y1=a5*imgOut+a6*tm1+b1*ym1+b2*ym2; ////__DACE:10:0:0 ////__DACE:10:0:0
/////////////////// ////__DACE:10:0:0 ////__DACE:10:0:0
////__DACE:10:0:0 ////__DACE:10:0:0
____y1[((4096 * i) + j)] = y1; ////__DACE:10:0:0 ////__DACE:10:0:0
} ////__DACE:10:0:0
{ ////__DACE:10:0:14
float y1 = ____y1[((4096 * i) + j)]; ////__DACE:10:0:9,14 ////__DACE:10:0:14
float ym1; ////__DACE:10:0:14 ////__DACE:10:0:14
////__DACE:10:0:14 ////__DACE:10:0:14
/////////////////// ////__DACE:10:0:14 ////__DACE:10:0:14
ym1=y1; ////__DACE:10:0:14 ////__DACE:10:0:14
/////////////////// ////__DACE:10:0:14 ////__DACE:10:0:14
////__DACE:10:0:14 ////__DACE:10:0:14
___ym1 = ym1; ////__DACE:10:0:14 ////__DACE:10:0:14
} ////__DACE:10:0:14
} // End omp section
#pragma omp section
{
{ ////__DACE:10:0:10
float imgOut = ____imgOut[((4096 * i) + j)]; ////__DACE:10:0:2,10 ////__DACE:10:0:10
float tm1; ////__DACE:10:0:10 ////__DACE:10:0:10
////__DACE:10:0:10 ////__DACE:10:0:10
/////////////////// ////__DACE:10:0:10 ////__DACE:10:0:10
tm1=imgOut; ////__DACE:10:0:10 ////__DACE:10:0:10
/////////////////// ////__DACE:10:0:10 ////__DACE:10:0:10
////__DACE:10:0:10 ////__DACE:10:0:10
___tm1 = tm1; ////__DACE:10:0:10 ////__DACE:10:0:10
} ////__DACE:10:0:10
} // End omp section
#pragma omp section
{
{ ////__DACE:10:0:12
float ym1 = ___ym1; ////__DACE:10:0:6,12 ////__DACE:10:0:12
float ym2; ////__DACE:10:0:12 ////__DACE:10:0:12
////__DACE:10:0:12 ////__DACE:10:0:12
/////////////////// ////__DACE:10:0:12 ////__DACE:10:0:12
ym2=ym1; ////__DACE:10:0:12 ////__DACE:10:0:12
/////////////////// ////__DACE:10:0:12 ////__DACE:10:0:12
////__DACE:10:0:12 ////__DACE:10:0:12
___ym2 = ym2; ////__DACE:10:0:12 ////__DACE:10:0:12
} ////__DACE:10:0:12
} // End omp section
} // End omp sections
} ////__DACE:10:0
} ////__DACE:9:3:0
////__DACE:9:3:0
void FOR114_4_6_0(int& ___w, int& ___h, float* ___imgOut, float* ___y1, float& __tm1, float& __ym1, float& __ym2, float& __a5, float& __a6, float& __b1, float& __b2, int _argcount, int i, int j) { ////__DACE:4:6:0
long long i; ////__DACE:9
////__DACE:9
{ ////__DACE:9:0
#pragma omp parallel sections
{
#pragma omp section
{
{ ////__DACE:9:0:0
float tm1; ////__DACE:9:0:0 ////__DACE:9:0:0
////__DACE:9:0:0 ////__DACE:9:0:0
/////////////////// ////__DACE:9:0:0 ////__DACE:9:0:0
tm1=0.0f; ////__DACE:9:0:0 ////__DACE:9:0:0
/////////////////// ////__DACE:9:0:0 ////__DACE:9:0:0
////__DACE:9:0:0 ////__DACE:9:0:0
__tm1 = tm1; ////__DACE:9:0:0 ////__DACE:9:0:0
} ////__DACE:9:0:0
} // End omp section
#pragma omp section
{
{ ////__DACE:9:0:2
float ym1; ////__DACE:9:0:2 ////__DACE:9:0:2
////__DACE:9:0:2 ////__DACE:9:0:2
/////////////////// ////__DACE:9:0:2 ////__DACE:9:0:2
ym1=0.0f; ////__DACE:9:0:2 ////__DACE:9:0:2
/////////////////// ////__DACE:9:0:2 ////__DACE:9:0:2
////__DACE:9:0:2 ////__DACE:9:0:2
__ym1 = ym1; ////__DACE:9:0:2 ////__DACE:9:0:2
} ////__DACE:9:0:2
} // End omp section
#pragma omp section
{
{ ////__DACE:9:0:4
float ym2; ////__DACE:9:0:4 ////__DACE:9:0:4
////__DACE:9:0:4 ////__DACE:9:0:4
/////////////////// ////__DACE:9:0:4 ////__DACE:9:0:4
ym2=0.0f; ////__DACE:9:0:4 ////__DACE:9:0:4
/////////////////// ////__DACE:9:0:4 ////__DACE:9:0:4
////__DACE:9:0:4 ////__DACE:9:0:4
__ym2 = ym2; ////__DACE:9:0:4 ////__DACE:9:0:4
} ////__DACE:9:0:4
} // End omp section
} // End omp sections
} ////__DACE:9:0
for (i = 0; (i < ___w); i = i+1) { ////__DACE:9:2
{ ////__DACE:9:3
FOR118_9_3_0(___w, &___imgOut[0], &___y1[0], __tm1, __ym1, __ym2, __a5, __a6, __b1, __b2, _argcount, i, j); ////__DACE:9:3:0
} ////__DACE:9:3
} ////__DACE:9:2
} ////__DACE:4:6:0
////__DACE:4:6:0
void FOR132_11_2_0(int& ____w, float* ____imgOut, float* ____y2, float& ___tp1, float& ___tp2, float& ___yp1, float& ___yp2, float& ___a7, float& ___a8, float& ___b1, float& ___b2, int _argcount, int i, int j) { ////__DACE:11:2:0
////__DACE:12
{ ////__DACE:12:0
#pragma omp parallel sections
{
#pragma omp section
{
{ ////__DACE:12:0:0
float a7 = ___a7; ////__DACE:12:0:1,0 ////__DACE:12:0:0
float tp1 = ___tp1; ////__DACE:12:0:2,0 ////__DACE:12:0:0
float a8 = ___a8; ////__DACE:12:0:3,0 ////__DACE:12:0:0
float tp2 = ___tp2; ////__DACE:12:0:4,0 ////__DACE:12:0:0
float b1 = ___b1; ////__DACE:12:0:5,0 ////__DACE:12:0:0
float yp1 = ___yp1; ////__DACE:12:0:6,0 ////__DACE:12:0:0
float b2 = ___b2; ////__DACE:12:0:7,0 ////__DACE:12:0:0
float yp2 = ___yp2; ////__DACE:12:0:8,0 ////__DACE:12:0:0
float y2; ////__DACE:12:0:0 ////__DACE:12:0:0
////__DACE:12:0:0 ////__DACE:12:0:0
/////////////////// ////__DACE:12:0:0 ////__DACE:12:0:0
y2=a7*tp1+a8*tp2+b1*yp1+b2*yp2; ////__DACE:12:0:0 ////__DACE:12:0:0
/////////////////// ////__DACE:12:0:0 ////__DACE:12:0:0
////__DACE:12:0:0 ////__DACE:12:0:0
____y2[((4096 * i) + j)] = y2; ////__DACE:12:0:0 ////__DACE:12:0:0
} ////__DACE:12:0:0
{ ////__DACE:12:0:17
float y2 = ____y2[((4096 * i) + j)]; ////__DACE:12:0:9,17 ////__DACE:12:0:17
float yp1; ////__DACE:12:0:17 ////__DACE:12:0:17
////__DACE:12:0:17 ////__DACE:12:0:17
/////////////////// ////__DACE:12:0:17 ////__DACE:12:0:17
yp1=y2; ////__DACE:12:0:17 ////__DACE:12:0:17
/////////////////// ////__DACE:12:0:17 ////__DACE:12:0:17
////__DACE:12:0:17 ////__DACE:12:0:17
___yp1 = yp1; ////__DACE:12:0:17 ////__DACE:12:0:17
} ////__DACE:12:0:17
} // End omp section
#pragma omp section
{
{ ////__DACE:12:0:10
float tp1 = ___tp1; ////__DACE:12:0:2,10 ////__DACE:12:0:10
float tp2; ////__DACE:12:0:10 ////__DACE:12:0:10
////__DACE:12:0:10 ////__DACE:12:0:10
/////////////////// ////__DACE:12:0:10 ////__DACE:12:0:10
tp2=tp1; ////__DACE:12:0:10 ////__DACE:12:0:10
/////////////////// ////__DACE:12:0:10 ////__DACE:12:0:10
////__DACE:12:0:10 ////__DACE:12:0:10
___tp2 = tp2; ////__DACE:12:0:10 ////__DACE:12:0:10
} ////__DACE:12:0:10
} // End omp section
#pragma omp section
{
{ ////__DACE:12:0:15
float yp1 = ___yp1; ////__DACE:12:0:6,15 ////__DACE:12:0:15
float yp2; ////__DACE:12:0:15 ////__DACE:12:0:15
////__DACE:12:0:15 ////__DACE:12:0:15
/////////////////// ////__DACE:12:0:15 ////__DACE:12:0:15
yp2=yp1; ////__DACE:12:0:15 ////__DACE:12:0:15
/////////////////// ////__DACE:12:0:15 ////__DACE:12:0:15
////__DACE:12:0:15 ////__DACE:12:0:15
___yp2 = yp2; ////__DACE:12:0:15 ////__DACE:12:0:15
} ////__DACE:12:0:15
} // End omp section
#pragma omp section
{
{ ////__DACE:12:0:12
float imgOut = ____imgOut[((4096 * i) + j)]; ////__DACE:12:0:13,12 ////__DACE:12:0:12
float tp1; ////__DACE:12:0:12 ////__DACE:12:0:12
////__DACE:12:0:12 ////__DACE:12:0:12
/////////////////// ////__DACE:12:0:12 ////__DACE:12:0:12
tp1=imgOut; ////__DACE:12:0:12 ////__DACE:12:0:12
/////////////////// ////__DACE:12:0:12 ////__DACE:12:0:12
////__DACE:12:0:12 ////__DACE:12:0:12
___tp1 = tp1; ////__DACE:12:0:12 ////__DACE:12:0:12
} ////__DACE:12:0:12
} // End omp section
} // End omp sections
} ////__DACE:12:0
} ////__DACE:11:2:0
////__DACE:11:2:0
void FOR127_4_8_0(int& ___w, int& ___h, float* ___imgOut, float* ___y2, float& __tp1, float& __tp2, float& __yp1, float& __yp2, float& __a7, float& __a8, float& __b1, float& __b2, int _argcount, int i, int j) { ////__DACE:4:8:0
long long i; ////__DACE:11
////__DACE:11
__state_11_state128:; ////__DACE:11:0
{ ////__DACE:11:0
#pragma omp parallel sections
{
#pragma omp section
{
{ ////__DACE:11:0:0
float tp1; ////__DACE:11:0:0 ////__DACE:11:0:0
////__DACE:11:0:0 ////__DACE:11:0:0
/////////////////// ////__DACE:11:0:0 ////__DACE:11:0:0
tp1=0.0f; ////__DACE:11:0:0 ////__DACE:11:0:0
/////////////////// ////__DACE:11:0:0 ////__DACE:11:0:0
////__DACE:11:0:0 ////__DACE:11:0:0
__tp1 = tp1; ////__DACE:11:0:0 ////__DACE:11:0:0
} ////__DACE:11:0:0
} // End omp section
#pragma omp section
{
{ ////__DACE:11:0:2
float tp2; ////__DACE:11:0:2 ////__DACE:11:0:2
////__DACE:11:0:2 ////__DACE:11:0:2
/////////////////// ////__DACE:11:0:2 ////__DACE:11:0:2
tp2=0.0f; ////__DACE:11:0:2 ////__DACE:11:0:2
/////////////////// ////__DACE:11:0:2 ////__DACE:11:0:2
////__DACE:11:0:2 ////__DACE:11:0:2
__tp2 = tp2; ////__DACE:11:0:2 ////__DACE:11:0:2
} ////__DACE:11:0:2
} // End omp section
#pragma omp section
{
{ ////__DACE:11:0:4
float yp1; ////__DACE:11:0:4 ////__DACE:11:0:4
////__DACE:11:0:4 ////__DACE:11:0:4
/////////////////// ////__DACE:11:0:4 ////__DACE:11:0:4
yp1=0.0f; ////__DACE:11:0:4 ////__DACE:11:0:4
/////////////////// ////__DACE:11:0:4 ////__DACE:11:0:4
////__DACE:11:0:4 ////__DACE:11:0:4
__yp1 = yp1; ////__DACE:11:0:4 ////__DACE:11:0:4
} ////__DACE:11:0:4
} // End omp section
#pragma omp section
{
{ ////__DACE:11:0:6
float yp2; ////__DACE:11:0:6 ////__DACE:11:0:6
////__DACE:11:0:6 ////__DACE:11:0:6
/////////////////// ////__DACE:11:0:6 ////__DACE:11:0:6
yp2=0.0f; ////__DACE:11:0:6 ////__DACE:11:0:6
/////////////////// ////__DACE:11:0:6 ////__DACE:11:0:6
////__DACE:11:0:6 ////__DACE:11:0:6
__yp2 = yp2; ////__DACE:11:0:6 ////__DACE:11:0:6
} ////__DACE:11:0:6
} // End omp section
} // End omp sections
} ////__DACE:11:0
if ((i >= 0)) { ////__DACE:11:0
goto __state_11_stateFOR132; ////__DACE:11:0
}
if (((i >= 0) == false)) { ////__DACE:11:0
goto __state_11_MergeState132; ////__DACE:11:0
}
__state_11_stateFOR132:; ////__DACE:11:2
{ ////__DACE:11:2
FOR132_11_2_0(___w, &___imgOut[0], &___y2[0], __tp1, __tp2, __yp1, __yp2, __a7, __a8, __b1, __b2, _argcount, i, j); ////__DACE:11:2:0
} ////__DACE:11:2
i = i-1; ////__DACE:11:2
goto __state_11_state128; ////__DACE:11:2
__state_11_MergeState132:; ////__DACE:11:1
} ////__DACE:4:8:0
////__DACE:4:8:0
void FOR141_4_11_0(int& ___w, int& ___h, float* ___imgOut, float* ___y1, float* ___y2, float& __c2, int _argcount, int i, int j) { ////__DACE:4:11:0
long long j; ////__DACE:13
////__DACE:13
for (j = 0; (j < ___h); j = j+1) { ////__DACE:13:2
{ ////__DACE:13:3
{ ////__DACE:13:3:4
float c2 = __c2; ////__DACE:13:3:3,4 ////__DACE:13:3:4
float y1 = ___y1[((4096 * i) + j)]; ////__DACE:13:3:1,4 ////__DACE:13:3:4
float y2 = ___y2[((4096 * i) + j)]; ////__DACE:13:3:2,4 ////__DACE:13:3:4
float imgOut; ////__DACE:13:3:4 ////__DACE:13:3:4
////__DACE:13:3:4 ////__DACE:13:3:4
/////////////////// ////__DACE:13:3:4 ////__DACE:13:3:4
imgOut=c2*(y1+y2); ////__DACE:13:3:4 ////__DACE:13:3:4
/////////////////// ////__DACE:13:3:4 ////__DACE:13:3:4
////__DACE:13:3:4 ////__DACE:13:3:4
___imgOut[((4096 * i) + j)] = imgOut; ////__DACE:13:3:4 ////__DACE:13:3:4
} ////__DACE:13:3:4
} ////__DACE:13:3
} ////__DACE:13:2
} ////__DACE:4:11:0
////__DACE:4:11:0
void kernel_deriche_1_0_11(int& __w, int& __h, float& __alpha, float* __imgIn, float* __imgOut, float* __y1, float* __y2, int _argcount) { ////__DACE:1:0:11
float _a1; ////__DACE:4
float _a5; ////__DACE:4
float _a2; ////__DACE:4
float _a6; ////__DACE:4
float _a3; ////__DACE:4
float _a7; ////__DACE:4
float _a4; ////__DACE:4
float _a8; ////__DACE:4
float _b1; ////__DACE:4
float _b2; ////__DACE:4
float _c2; ////__DACE:4
float _ym1; ////__DACE:4
float _ym2; ////__DACE:4
float _yp1; ////__DACE:4
float _yp2; ////__DACE:4
long long i; ////__DACE:4
long long j; ////__DACE:4
////__DACE:4
{ ////__DACE:4:0
float _k; ////__DACE:4:0:2
float _c1; ////__DACE:4:0:20
#pragma omp parallel sections
{
#pragma omp section
{
{ ////__DACE:4:0:0
float alpha = __alpha; ////__DACE:4:0:1,0 ////__DACE:4:0:0
float k; ////__DACE:4:0:0 ////__DACE:4:0:0
////__DACE:4:0:0 ////__DACE:4:0:0
/////////////////// ////__DACE:4:0:0 ////__DACE:4:0:0
k=(1.0f-expf(-alpha))*(1.0f-expf(-alpha))/(1.0f+2.0f*alpha*expf(-alpha)-expf(2.0f*alpha)); ////__DACE:4:0:0 ////__DACE:4:0:0
/////////////////// ////__DACE:4:0:0 ////__DACE:4:0:0
////__DACE:4:0:0 ////__DACE:4:0:0
_k = k; ////__DACE:4:0:0 ////__DACE:4:0:0
} ////__DACE:4:0:0
{ ////__DACE:4:0:3
float k = _k; ////__DACE:4:0:2,3 ////__DACE:4:0:3
float a1; ////__DACE:4:0:3 ////__DACE:4:0:3
float a5; ////__DACE:4:0:3 ////__DACE:4:0:3
////__DACE:4:0:3 ////__DACE:4:0:3
/////////////////// ////__DACE:4:0:3 ////__DACE:4:0:3
a1=a5=k; ////__DACE:4:0:3 ////__DACE:4:0:3
/////////////////// ////__DACE:4:0:3 ////__DACE:4:0:3
////__DACE:4:0:3 ////__DACE:4:0:3
_a1 = a1; ////__DACE:4:0:3 ////__DACE:4:0:3
_a5 = a5; ////__DACE:4:0:3 ////__DACE:4:0:3
} ////__DACE:4:0:3
{ ////__DACE:4:0:6
float k = _k; ////__DACE:4:0:2,6 ////__DACE:4:0:6
float alpha = __alpha; ////__DACE:4:0:1,6 ////__DACE:4:0:6
float a2; ////__DACE:4:0:6 ////__DACE:4:0:6
float a6; ////__DACE:4:0:6 ////__DACE:4:0:6
////__DACE:4:0:6 ////__DACE:4:0:6
/////////////////// ////__DACE:4:0:6 ////__DACE:4:0:6
a2=a6=k*expf(-alpha)*(alpha-1.0f); ////__DACE:4:0:6 ////__DACE:4:0:6
/////////////////// ////__DACE:4:0:6 ////__DACE:4:0:6
////__DACE:4:0:6 ////__DACE:4:0:6
_a2 = a2; ////__DACE:4:0:6 ////__DACE:4:0:6
_a6 = a6; ////__DACE:4:0:6 ////__DACE:4:0:6
} ////__DACE:4:0:6
{ ////__DACE:4:0:9
float k = _k; ////__DACE:4:0:2,9 ////__DACE:4:0:9
float alpha = __alpha; ////__DACE:4:0:1,9 ////__DACE:4:0:9
float a3; ////__DACE:4:0:9 ////__DACE:4:0:9
float a7; ////__DACE:4:0:9 ////__DACE:4:0:9
////__DACE:4:0:9 ////__DACE:4:0:9
/////////////////// ////__DACE:4:0:9 ////__DACE:4:0:9
a3=a7=k*expf(-alpha)*(alpha+1.0f); ////__DACE:4:0:9 ////__DACE:4:0:9
/////////////////// ////__DACE:4:0:9 ////__DACE:4:0:9
////__DACE:4:0:9 ////__DACE:4:0:9
_a3 = a3; ////__DACE:4:0:9 ////__DACE:4:0:9
_a7 = a7; ////__DACE:4:0:9 ////__DACE:4:0:9
} ////__DACE:4:0:9
{ ////__DACE:4:0:12
float k = _k; ////__DACE:4:0:2,12 ////__DACE:4:0:12
float a4; ////__DACE:4:0:12 ////__DACE:4:0:12
float a8; ////__DACE:4:0:12 ////__DACE:4:0:12
////__DACE:4:0:12 ////__DACE:4:0:12
/////////////////// ////__DACE:4:0:12 ////__DACE:4:0:12
a4=a8=-k*expf(-2.0f*alpha); ////__DACE:4:0:12 ////__DACE:4:0:12
/////////////////// ////__DACE:4:0:12 ////__DACE:4:0:12
////__DACE:4:0:12 ////__DACE:4:0:12
_a4 = a4; ////__DACE:4:0:12 ////__DACE:4:0:12
_a8 = a8; ////__DACE:4:0:12 ////__DACE:4:0:12
} ////__DACE:4:0:12
} // End omp section
#pragma omp section
{
{ ////__DACE:4:0:15
float b1; ////__DACE:4:0:15 ////__DACE:4:0:15
////__DACE:4:0:15 ////__DACE:4:0:15
/////////////////// ////__DACE:4:0:15 ////__DACE:4:0:15
b1=powf(2.0f,-alpha); ////__DACE:4:0:15 ////__DACE:4:0:15
/////////////////// ////__DACE:4:0:15 ////__DACE:4:0:15
////__DACE:4:0:15 ////__DACE:4:0:15
_b1 = b1; ////__DACE:4:0:15 ////__DACE:4:0:15
} ////__DACE:4:0:15
} // End omp section
#pragma omp section
{
{ ////__DACE:4:0:17
float b2; ////__DACE:4:0:17 ////__DACE:4:0:17
////__DACE:4:0:17 ////__DACE:4:0:17
/////////////////// ////__DACE:4:0:17 ////__DACE:4:0:17
b2=-expf(-2.0f*alpha); ////__DACE:4:0:17 ////__DACE:4:0:17
/////////////////// ////__DACE:4:0:17 ////__DACE:4:0:17
////__DACE:4:0:17 ////__DACE:4:0:17
_b2 = b2; ////__DACE:4:0:17 ////__DACE:4:0:17
} ////__DACE:4:0:17
} // End omp section
#pragma omp section
{
{ ////__DACE:4:0:19
float c1; ////__DACE:4:0:19 ////__DACE:4:0:19
float c2; ////__DACE:4:0:19 ////__DACE:4:0:19
////__DACE:4:0:19 ////__DACE:4:0:19
/////////////////// ////__DACE:4:0:19 ////__DACE:4:0:19
c1=c2=1; ////__DACE:4:0:19 ////__DACE:4:0:19
/////////////////// ////__DACE:4:0:19 ////__DACE:4:0:19
////__DACE:4:0:19 ////__DACE:4:0:19
_c1 = c1; ////__DACE:4:0:19 ////__DACE:4:0:19
_c2 = c2; ////__DACE:4:0:19 ////__DACE:4:0:19
} ////__DACE:4:0:19
} // End omp section
} // End omp sections
} ////__DACE:4:0
for (i = 0; (i < __w); i = i+1) { ////__DACE:4:1
{ ////__DACE:4:2
float _xm1; ////__DACE:4:2:9
FOR83_4_2_0(__w, __h, &__imgIn[0], &__y1[0], _xm1, _ym1, _ym2, _a1, _a2, _b1, _b2, _argcount, i); ////__DACE:4:2:0
} ////__DACE:4:2
} ////__DACE:4:1
i = 0; ////__DACE:4:1
for (; (i < __w); i = i+1) { ////__DACE:4:3
{ ////__DACE:4:4
float _xp1; ////__DACE:4:4:9
float _xp2; ////__DACE:4:4:11
FOR95_4_4_0(__w, __h, &__imgIn[0], &__y2[0], _xp1, _xp2, _yp1, _yp2, _a3, _a4, _b1, _b2, _argcount, i); ////__DACE:4:4:0
} ////__DACE:4:4
} ////__DACE:4:3
j = 0; ////__DACE:4:3
for (; (j < __h); j = j+1) { ////__DACE:4:5
{ ////__DACE:4:6
float _tm1; ////__DACE:4:6:9
FOR114_4_6_0(__w, __h, &__imgOut[0], &__y1[0], _tm1, _ym1, _ym2, _a5, _a6, _b1, _b2, _argcount, i, j); ////__DACE:4:6:0
} ////__DACE:4:6
} ////__DACE:4:5
j = 0; ////__DACE:4:5
for (; (j < __h); j = j+1) { ////__DACE:4:7
{ ////__DACE:4:8
float _tp1; ////__DACE:4:8:9
float _tp2; ////__DACE:4:8:11
FOR127_4_8_0(__w, __h, &__imgOut[0], &__y2[0], _tp1, _tp2, _yp1, _yp2, _a7, _a8, _b1, _b2, _argcount, i, j); ////__DACE:4:8:0
} ////__DACE:4:8
} ////__DACE:4:7
i = 0; ////__DACE:4:7
for (; (i < __w); i = i+1) { ////__DACE:4:10
{ ////__DACE:4:11
FOR141_4_11_0(__w, __h, &__imgOut[0], &__y1[0], &__y2[0], _c2, _argcount, i, j); ////__DACE:4:11:0
} ////__DACE:4:11
} ////__DACE:4:10
} ////__DACE:1:0:11
////__DACE:1:0:11
void main_0_0_0(int& __argc, double* __argv, int _argcount) { ////__DACE:0:0:0
////__DACE:1
{ ////__DACE:1:0
int _w; ////__DACE:1:0:1
int _h; ////__DACE:1:0:3
float _alpha; ////__DACE:1:0:7
float *_imgIn = new float DACE_ALIGN(64)[8847360]; ////__DACE:1:0:9
float *_imgOut = new float DACE_ALIGN(64)[8847360]; ////__DACE:1:0:16
float *_y1 = new float DACE_ALIGN(64)[8847360]; ////__DACE:1:0:18
float *_y2 = new float DACE_ALIGN(64)[8847360]; ////__DACE:1:0:20
#pragma omp parallel sections
{
#pragma omp section
{
{ ////__DACE:1:0:0
int w; ////__DACE:1:0:0 ////__DACE:1:0:0
////__DACE:1:0:0 ////__DACE:1:0:0
/////////////////// ////__DACE:1:0:0 ////__DACE:1:0:0
w=4096; ////__DACE:1:0:0 ////__DACE:1:0:0
/////////////////// ////__DACE:1:0:0 ////__DACE:1:0:0
////__DACE:1:0:0 ////__DACE:1:0:0
_w = w; ////__DACE:1:0:0 ////__DACE:1:0:0
} ////__DACE:1:0:0
{ ////__DACE:1:0:2
int h; ////__DACE:1:0:2 ////__DACE:1:0:2
////__DACE:1:0:2 ////__DACE:1:0:2
/////////////////// ////__DACE:1:0:2 ////__DACE:1:0:2
h=2160; ////__DACE:1:0:2 ////__DACE:1:0:2
/////////////////// ////__DACE:1:0:2 ////__DACE:1:0:2
////__DACE:1:0:2 ////__DACE:1:0:2
_h = h; ////__DACE:1:0:2 ////__DACE:1:0:2
} ////__DACE:1:0:2
init_array_1_0_4(_w, _h, _alpha, &_imgIn[0], _argcount); ////__DACE:1:0:4
kernel_deriche_1_0_11(_w, _h, _alpha, &_imgIn[0], &_imgOut[0], &_y1[0], &_y2[0], _argcount); ////__DACE:1:0:11
} // End omp section
#pragma omp section
{
{ ////__DACE:1:0:22
int argc; ////__DACE:1:0:22 ////__DACE:1:0:22
////__DACE:1:0:22 ////__DACE:1:0:22
/////////////////// ////__DACE:1:0:22 ////__DACE:1:0:22
argc=0; ////__DACE:1:0:22 ////__DACE:1:0:22
/////////////////// ////__DACE:1:0:22 ////__DACE:1:0:22
////__DACE:1:0:22 ////__DACE:1:0:22
__argc = argc; ////__DACE:1:0:22 ////__DACE:1:0:22
} ////__DACE:1:0:22
} // End omp section
#pragma omp section
{
{ ////__DACE:1:0:24
double argv; ////__DACE:1:0:24 ////__DACE:1:0:24
////__DACE:1:0:24 ////__DACE:1:0:24
/////////////////// ////__DACE:1:0:24 ////__DACE:1:0:24
argv=0; ////__DACE:1:0:24 ////__DACE:1:0:24
/////////////////// ////__DACE:1:0:24 ////__DACE:1:0:24
////__DACE:1:0:24 ////__DACE:1:0:24
__argv[0] = argv; ////__DACE:1:0:24 ////__DACE:1:0:24
} ////__DACE:1:0:24
} // End omp section
} // End omp sections
delete[] _imgIn; ////__DACE:1:0:9
delete[] _imgOut; ////__DACE:1:0:16
delete[] _y1; ////__DACE:1:0:18
delete[] _y2; ////__DACE:1:0:20
} ////__DACE:1:0
} ////__DACE:0:0:0
////__DACE:0:0:0
void __program_Top_internal(double * __restrict__ _argv, int _argc, int _argcount)
{
////__DACE:0
{ ////__DACE:0:0
main_0_0_0(_argc, &_argv[0], _argcount); ////__DACE:0:0:0
} ////__DACE:0:0
} ////__DACE:0
////__DACE:0
DACE_EXPORTED void __program_Top(double * __restrict__ _argv, int _argc, int _argcount) ////__DACE:0
{ ////__DACE:0
__program_Top_internal(_argv, _argc, _argcount); ////__DACE:0
} ////__DACE:0
////__DACE:0
DACE_EXPORTED int __dace_init_Top(double * __restrict__ _argv, int _argc, int _argcount) ////__DACE:0
{ ////__DACE:0
int __result = 0; ////__DACE:0
////__DACE:0
return __result; ////__DACE:0
} ////__DACE:0
////__DACE:0
DACE_EXPORTED void __dace_exit_Top(double * __restrict__ _argv, int _argc, int _argcount) ////__DACE:0
{ ////__DACE:0
} ////__DACE:0
|
mandelbrot.c | #include "mandelbrot.h"
#include "il/color/color.h"
array2ui8 *mandelbrot(float x_left, float x_right, float y_bottom, float y_top, int depth, int nx, int ny) {
float dx = (x_right - x_left) / nx;
float dy = (y_top - y_bottom) / ny;
array2ui8 *m = array2ui8_new();
array2ui8_resize(m, ny, nx);
for (int ky = 0; ky < ny; ky++) {
float y = y_top - ky * dy;
#pragma omp simd
for (int kx = 0; kx < nx; kx++) {
float x = x_left + kx * dx;
float z_re = 0.0;
float z_im = 0.0;
int count = 0;
while (count < depth) {
if (z_re * z_re + z_im * z_im > 4.0) {
break;
}
float old_z_re = z_re;
z_re = z_re * z_re - z_im * z_im + x;
z_im = 2 * old_z_re * z_im + y;
count++;
}
array2ui8_set(m, ky, kx, (uint8_t) (255 * (((float) count) / depth)));
}
}
return m;
}
array3ui8 *mandelbrot_color(float x_left, float x_right, float y_bottom, float y_top, int max_iter, int nx, int ny) {
float dx = (x_right - x_left) / nx;
float dy = (y_top - y_bottom) / ny;
array3ui8 *m = array3ui8_new();
array3ui8_resize(m, 3, ny, nx);
for (int ky = 0; ky < ny; ky++) {
float y = y_top - ky * dy;
for (int kx = 0; kx < nx; kx++) {
float x = x_left + kx * dx;
float z_re = 0.0;
float z_im = 0.0;
int count = 0;
while (count < max_iter) {
if (z_re * z_re + z_im * z_im > 4.0) {
break;
}
float old_z_re = z_re;
z_re = z_re * z_re - z_im * z_im + x;
z_im = 2 * old_z_re * z_im + y;
count++;
}
hsvf color;
color.h = (360.0f * count) / max_iter;
color.s = 1.0f;
color.v = count < max_iter ? 1.0f : 0.0f;
rgbf rgb = rgbf_from_hsvf(color);
array3ui8_set(m, 0, ky, kx, (int8_t) (255.0f * rgb.r));
array3ui8_set(m, 1, ky, kx, (int8_t) (255.0f * rgb.g));
array3ui8_set(m, 2, ky, kx, (int8_t) (255.0f * rgb.b));
}
}
return m;
} |
tutorial_region_prof.c | /*
* Copyright (c) 2015, 2016, 2017, 2018, 2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <stdint.h>
#include <mpi.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <geopm.h>
#include "tutorial_region.h"
#ifdef _OPENMP
static int stream_profiled_omp(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c)
{
const size_t block = 256;
const size_t num_block = num_stream / block;
const size_t num_remain = num_stream % block;
int err = 0;
int num_thread = 1;
#pragma omp parallel
{
num_thread = omp_get_num_threads();
}
#pragma omp parallel
{
int thread_idx = omp_get_thread_num();
(void)geopm_tprof_init_loop(num_thread, thread_idx, num_block, 0);
#pragma omp for
for (size_t i = 0; i < num_block; ++i) {
for (size_t j = 0; j < block; ++j) {
a[i * block + j] = b[i * block + j] + scalar * c[i * block + j];
}
(void)geopm_tprof_post();
}
#pragma omp for
for (size_t j = 0; j < num_remain; ++j) {
a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j];
}
}
return err;
}
#endif
static int stream_profiled_serial(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c)
{
const size_t block = 256;
const size_t num_block = num_stream / block;
const size_t num_remain = num_stream % block;
const double norm = 1.0 / num_block;
for (size_t i = 0; i < num_block; ++i) {
for (size_t j = 0; j < block; ++j) {
a[i * block + j] = b[i * block + j] + scalar * c[i * block + j];
}
geopm_prof_progress(region_id, i * norm);
}
for (size_t j = 0; j < num_remain; ++j) {
a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j];
}
return 0;
}
int tutorial_stream_profiled(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
size_t cline_size = 64;
size_t num_stream = (size_t)big_o * 500000000;
size_t mem_size = sizeof(double) * num_stream;
double *a = NULL;
double *b = NULL;
double *c = NULL;
double scalar = 3.0;
uint64_t stream_rid;
if (!err) {
err = geopm_prof_region("tutorial_stream",
GEOPM_REGION_HINT_MEMORY,
&stream_rid);
}
err = posix_memalign((void *)&a, cline_size, mem_size);
if (!err) {
err = posix_memalign((void *)&b, cline_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&c, cline_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < num_stream; i++) {
a[i] = 0.0;
b[i] = 1.0;
c[i] = 2.0;
}
if (do_report) {
printf("Executing profiled STREAM triad on length %d vectors.\n", num_stream);
fflush(stdout);
}
err = geopm_prof_enter(stream_rid);
}
if (!err) {
#ifdef _OPENMP
err = stream_profiled_omp(stream_rid, num_stream, scalar, a, b, c);
#else
err = stream_profiled_serial(stream_rid, num_stream, scalar, a, b, c);
#endif
}
if (!err) {
err = geopm_prof_exit(stream_rid);
}
if (!err) {
free(c);
free(b);
free(a);
}
}
}
|
fwk_render.h | // naive rendering framework
// - rlyeh, public domain
//
// IQM skeletal meshes by @lsalzman (public domain) - https://bit.ly/2OQh0Me
// SH code by @ands (public domain) - https://github.com/ands/spherical_harmonics_playground
// SHM code by @jarikomppa (unlicensed) - https://github.com/jarikomppa/shadertoolkit
#ifndef RENDER_H
#define RENDER_H
typedef unsigned handle; // GLuint
// -----------------------------------------------------------------------------
// colors
uint32_t rgba( uint8_t r, uint8_t g, uint8_t b, uint8_t a );
uint32_t bgra( uint8_t r, uint8_t g, uint8_t b, uint8_t a );
float alpha( uint32_t rgba );
#define RGBX(rgb,x) ((x)<<24|((rgb>>16)&255)<<16|((rgb>>8)&255)<<8|((rgb>>0)&255)<<0)
#define BLACK RGBX(0x000000,255)
#define RED RGBX(0xFF004D,255)
#define GREEN RGBX(0x00B543,255)
#define BLUE RGBX(0x065AB5,255)
#define ORANGE RGBX(0xFF6C24,255)
#define PURPLE RGBX(0x7E2553,255)
#define YELLOW RGBX(0xFFEC27,255)
#define WHITE RGBX(0xFFF1E8,255)
#define GRAY RGBX(0x725158,255)
// -----------------------------------------------------------------------------
// images
enum {
IMAGE_R = 0x01000,
IMAGE_RG = 0x02000,
IMAGE_RGB = 0x04000,
IMAGE_RGBA = 0x08000,
IMAGE_FLIP = 0x10000,
};
typedef struct image_t {
union { unsigned x, w; };
union { unsigned y, h; };
union { unsigned n, comps; };
union { void *pixels; unsigned char *pixels8; unsigned short *pixels16; unsigned *pixels32; float *pixelsf; };
} image_t;
image_t image(const char *pathfile, int flags);
image_t image_from_mem(const char *ptr, int len, int flags);
void image_destroy(image_t *img);
// -----------------------------------------------------------------------------
// textures
enum {
// UNIT[0..7]
TEXTURE_BC1 = 8, // DXT1, RGB with 8:1 compression ratio (+ optional 1bpp for alpha)
TEXTURE_BC2 = 16, // DXT3, RGBA with 4:1 compression ratio (BC1 for RGB + 4bpp for alpha)
TEXTURE_BC3 = 32, // DXT5, RGBA with 4:1 compression ratio (BC1 for RGB + BC4 for A)
// TEXTURE_BC4, // Alpha
TEXTURE_NEAREST = 0,
TEXTURE_LINEAR = 64,
TEXTURE_MIPMAPS = 128,
TEXTURE_EDGE = 0,
TEXTURE_BORDER = 0x100,
TEXTURE_REPEAT = 0x200,
TEXTURE_BYTE = 0,
TEXTURE_FLOAT = 0x400,
TEXTURE_COLOR = 0,
TEXTURE_DEPTH = 0x800,
TEXTURE_R = IMAGE_R,
TEXTURE_RG = IMAGE_RG,
TEXTURE_RGB = IMAGE_RGB,
TEXTURE_RGBA = IMAGE_RGBA,
TEXTURE_FLIP = IMAGE_FLIP,
// @fixme
TEXTURE_SRGB = 1 << 24,
TEXTURE_BGR = 1 << 25,
TEXTURE_ARRAY = 1 << 26,
};
typedef struct {
union { unsigned x, w; };
union { unsigned y, h; };
union { unsigned n, bpp; };
handle id;
unsigned flags;
} texture_t;
texture_t texture(const char* filename, int flags);
texture_t texture_from_mem(const char* ptr, int len, int flags);
texture_t texture_create(unsigned w, unsigned h, unsigned n, void *pixels, int flags);
texture_t texture_checker();
void texture_destroy(texture_t *t);
// textureLod(filename, dir, lod);
//void texture_add_loader( int(*loader)(const char *filename, int *w, int *h, int *bpp, int reqbpp, int flags) );
unsigned texture_update(texture_t *t, unsigned w, unsigned h, unsigned n, void *pixels, int flags);
// -----------------------------------------------------------------------------
// fullscreen quads
void fullscreen_rgb_quad( texture_t texture_rgb, float gamma );
void fullscreen_ycbcr_quad( texture_t texture_YCbCr[3], float gamma );
// -----------------------------------------------------------------------------
// sprites
void sprite( texture_t texture, float px, float py, float pz, float rot );
void sprite_ex( texture_t texture,
float px, float py, float pz, float rotation, // position(x,y,depth sort), angle
float ox, float oy, float sx, float sy, // offset(x,y), scale(x,y)
int additive, uint32_t rgba, // is_additive, tint color
float frame, float xcells, float ycells // frame_number in a 8x4 spritesheet
);
// -----------------------------------------------------------------------------
// cubemaps
typedef struct cubemap_t {
unsigned id; // texture id
vec3 sh[9]; // precomputed spherical harmonics coefficients
} cubemap_t;
cubemap_t cubemap( const image_t image, int flags ); // 1 equirectangular panorama
cubemap_t cubemap6( const image_t images[6], int flags ); // 6 cubemap faces
void cubemap_destroy(cubemap_t *c);
cubemap_t* cubemap_get_active();
// -----------------------------------------------------------------------------
// fbos
unsigned fbo( unsigned texture_color, unsigned texture_depth, int wr_flags );
void fbo_bind(unsigned id);
void fbo_unbind();
void fbo_destroy(unsigned id);
// -----------------------------------------------------------------------------
// shadowmaps
typedef struct shadowmap_t {
mat44 shadowmatrix;
mat44 mvp;
mat44 mv;
mat44 proj;
vec4 light_position;
int saved_fb;
int saved_viewport[4];
handle fbo, texture;
int texture_width;
} shadowmap_t;
shadowmap_t shadowmap(int texture_width); // = 1024
void shadowmap_destroy(shadowmap_t *s);
void shadowmap_set_shadowmatrix(shadowmap_t *s, vec3 aLightPos, vec3 aLightAt, vec3 aLightUp, const mat44 projection);
void shadowmap_begin(shadowmap_t *s);
void shadowmap_end(shadowmap_t *s);
// shadowmap utils
void shadowmatrix_proj(mat44 shm_proj, float aLightFov, float znear, float zfar);
void shadowmatrix_ortho(mat44 shm_proj, float left, float right, float bottom, float top, float znear, float zfar);
// -----------------------------------------------------------------------------
// shaders
unsigned shader(const char *vs, const char *fs, const char *attribs, const char *fragcolor);
unsigned shader_bind(unsigned program);
void shader_int(const char *uniform, int i);
void shader_float(const char *uniform, float f);
void shader_vec2(const char *uniform, vec2 v);
void shader_vec3(const char *uniform, vec3 v);
void shader_vec4(const char *uniform, vec4 v);
void shader_mat44(const char *uniform, mat44 m);
void shader_texture(const char *sampler, unsigned texture, unsigned unit);
unsigned shader_get_active();
void shader_destroy(unsigned shader);
// -----------------------------------------------------------------------------
// meshes (@fixme: deprecate?)
enum MESH_FLAGS {
MESH_STATIC = 0, // STATIC, DYNAMIC, STREAM // zero|single|many updates per frame
MESH_STREAM = 1,
MESH_TRIANGLE_STRIP = 2,
};
typedef struct mesh_t {
handle vao, vbo, ibo;
unsigned vertex_count;
unsigned index_count;
unsigned flags;
} mesh_t;
mesh_t mesh_create(const char *format, int vertex_stride,int vertex_count,const void *interleaved_vertex_data, int index_count,const void *index_data, int flags);
void mesh_upgrade(mesh_t *m, const char *format, int vertex_stride,int vertex_count,const void *interleaved_vertex_data, int index_count,const void *index_data, int flags);
void mesh_push_state(mesh_t *m, unsigned program, unsigned texture_id, float model[16], float view[16], float proj[16], unsigned billboard);
void mesh_pop_state(mesh_t *m);
void mesh_render(mesh_t *m);
void mesh_destroy(mesh_t *m);
aabb mesh_bounds(mesh_t *m);
// -----------------------------------------------------------------------------
// materials (@todo)
//
// typedef struct material_t {
// const char *name;
// texture_t texture;
// uint32_t color;
// } material_t;
// -----------------------------------------------------------------------------
// models
enum {
MODEL_NO_ANIMATIONS = 1,
MODEL_NO_MESHES = 2,
MODEL_NO_TEXTURES = 4,
};
typedef struct model_t {
struct iqm_t *iqm;
unsigned num_meshes;
unsigned num_triangles;
unsigned num_joints; // num_poses;
unsigned num_anims;
unsigned num_frames;
float curframe;
mat44 pivot;
} model_t;
model_t model(const char *filename, int flags);
model_t model_from_mem(const void *mem, int sz, int flags);
float model_animate(model_t, float curframe);
float model_animate_clip(model_t, float curframe, int minframe, int maxframe, bool loop);
aabb model_aabb(model_t, mat44 transform);
void model_render2(model_t, mat44 proj, mat44 view, mat44 model, int shader);
void model_render(model_t, mat44 proj, mat44 view, mat44 model);
void model_destroy(model_t);
// -----------------------------------------------------------------------------
// skyboxes
typedef struct skybox_t {
handle program;
mesh_t geometry;
cubemap_t cubemap;
int flags;
} skybox_t;
skybox_t skybox(const char *panorama_or_cubemap_folder, int flags);
int skybox_push_state(skybox_t *sky, mat44 proj, mat44 view);
int skybox_pop_state(skybox_t *sky);
void skybox_destroy(skybox_t *sky);
// -----------------------------------------------------------------------------
// post-fxs
void viewport_color(vec3 color);
void viewport_clear(bool color, bool depth);
void viewport_clip(vec2 from, vec2 to);
void fx_load(const char *file);
void fx_begin();
void fx_end();
void fx_enable(int pass, int enabled);
int fx_enabled(int pass);
void fx_enable_all(int enabled);
char * fx_name(int pass);
// -----------------------------------------------------------------------------
// utils
void* screenshot(unsigned components); // 3 RGB, 4 RGBA, -3 BGR, -4 BGRA
#endif // RENDER_H
#ifdef RENDER_C
#pragma once
// -----------------------------------------------------------------------------
// opengl
#define GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0
#define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1
#define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2
#define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3
#define GL_DEBUG_SEVERITY_HIGH 0x9146
#define GL_DEBUG_SEVERITY_NOTIFICATION 0x826B
#define GL_DEBUG_SOURCE_API 0x8246
#define GL_DEBUG_TYPE_ERROR 0x824C
//
void glDebugCallback(uint32_t source, uint32_t type, uint32_t id, uint32_t severity, int32_t length, const char * message, void * userdata) {
// whitelisted codes (also: 131169, 131204).
if( id == 131154 ) return; // Pixel-path performance warning: Pixel transfer is synchronized with 3D rendering.
if( id == 131185 ) return; // Buffer object 2 (bound to GL_ELEMENT_ARRAY_BUFFER_ARB, usage hint is GL_STATIC_DRAW) will use VIDEO memory as the source for buffer object operations
if( id == 131218 ) return; // Program/shader state performance warning: Vertex shader in program 9 is being recompiled based on GL state.
if( id == 2 ) return; // INFO: API_ID_RECOMPILE_FRAGMENT_SHADER performance warning has been generated. Fragment shader recompiled due to state change. [ID: 2]
const char * GL_ERROR_SOURCE[] = { "API", "WINDOW SYSTEM", "SHADER COMPILER", "THIRD PARTY", "APPLICATION", "OTHER" };
const char * GL_ERROR_SEVERITY[] = { "HIGH", "MEDIUM", "LOW", "NOTIFICATION" };
const char * GL_ERROR_TYPE[] = { "ERROR", "DEPRECATED BEHAVIOR", "UNDEFINED DEHAVIOUR", "PORTABILITY", "PERFORMANCE", "OTHER" };
severity = severity == GL_DEBUG_SEVERITY_NOTIFICATION ? 3 : severity - GL_DEBUG_SEVERITY_HIGH;
source = source - GL_DEBUG_SOURCE_API;
type = type - GL_DEBUG_TYPE_ERROR;
PRINTF( "!%s [ID: %u]\n", message, id );
// PANIC( "!%s [ID: %u]\n", message, id );
}
void glDebugEnable() {
do_once {
typedef void (*GLDEBUGPROC)(uint32_t, uint32_t, uint32_t, uint32_t, int32_t, const char *, const void *);
typedef void (*GLDEBUGMESSAGECALLBACKPROC)(GLDEBUGPROC, const void *);
void (*glDebugMessageCallback)(GLDEBUGPROC, const void *) = (GLDEBUGMESSAGECALLBACKPROC)glfwGetProcAddress("glDebugMessageCallback");
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB);
glDebugMessageCallback((GLDEBUGPROC)glDebugCallback, NULL);
}
}
void glNewFrame() {
glViewport(0, 0, window_width(), window_height());
//glClearColor(0,0,0,1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_BLEND);
}
// ----------------------------------------------------------------------------
// embedded shaders (@fixme: promote to files?)
static const char *const fs_0_0_shadowmap_lit =
// "#version 140 // inverse() requires v140\n"
"//" FILELINE "\n"
// "uniform mat4 view = mat4(1.0);\n"
"uniform vec3 lightPos = vec3(1.0);\n"
"uniform float doTexture = 1.;\n"
#if VSMCUBE
"uniform samplerCube shadowMap;\n" // VSMCUBE
#else
"uniform sampler2D shadowMap;\n" // !VSMCUBE
#endif
"struct light {\n"
" vec3 position; // world-space\n"
" vec4 diffuse;\n"
" vec4 specular;\n"
" float constantAttenuation, linearAttenuation, quadraticAttenuation;\n"
"};\n"
"light light0 = light(\n"
" lightPos,\n"
" vec4(1,1,1,1), // diffuse\n"
" vec4(1,1,1,1), // specular\n"
" 1.0, 0.0, 0.0 // attenuation (const, linear, quad)\n"
");\n"
"// From http://fabiensanglard.net/shadowmappingVSM/index.php\n"
#if VSMCUBE
"float chebyshevUpperBound(float distance, vec3 dir) {\n"
" distance = distance/20 ;\n"
" vec2 moments = texture(shadowMap, dir).rg;\n"
#else
"float chebyshevUpperBound(float distance, vec4 scPostW) {\n"
" vec2 moments = texture(shadowMap,scPostW.xy).rg;\n"
#endif
" // Surface is fully lit. as the current fragment is before the light occluder\n"
" if (distance <= moments.x)\n"
" return 1.0;\n"
" // The fragment is either in shadow or penumbra. We now use chebyshev's upperBound to check\n"
" // How likely this pixel is to be lit (p_max)\n"
" float variance = moments.y - (moments.x*moments.x);\n"
" //variance = max(variance, 0.000002);\n"
" variance = max(variance, 0.00002);\n"
" float d = distance - moments.x;\n"
" float p_max = variance / (variance + d*d);\n"
" return p_max;\n"
"}\n"
"vec4 shadowmap(in vec4 vpeye, in vec4 vneye, in vec2 uv, in vec4 sc) {\n"
#ifndef VSMCUBE
" return vec4(1.);\n"
#endif
" vec3 fragment = vec3(vpeye);\n"
" vec3 normal = vec3(normalize(vneye));\n"
" vec3 viewDir = normalize(-fragment);\n"
#if VSMCUBE
" // Diffuse lighting\n"
" // Convert to eye-space (TODO could precompute)\n"
" vec3 light_ = vec3(view * vec4(light0.position, 1.0));\n"
" // Vectors\n"
" vec3 fragmentToLight = light_ - fragment;\n"
" vec3 fragmentToLightDir = normalize(fragmentToLight);\n"
" // Shadows\n"
" vec4 fragmentToLight_world = inverse(view) * vec4(fragmentToLightDir, 0.0);\n"
" float shadowFactor = chebyshevUpperBound(length(fragmentToLight), -fragmentToLight_world.xyz);\n"
#else
" // Shadows\n"
" vec4 scPostW = sc / sc.w;\n"
" scPostW = scPostW * 0.5 + 0.5;\n"
" float shadowFactor = 1.0; // Not in shadow\n"
" bool outsideShadowMap = sc.w <= 0.0f || (scPostW.x < 0 || scPostW.y < 0) || (scPostW.x >= 1 || scPostW.y >= 1);\n"
" if (!outsideShadowMap) {\n"
" shadowFactor = chebyshevUpperBound(scPostW.z, scPostW);\n"
" }\n"
#endif
" // Lighting\n"
" // Convert to eye-space\n"
" vec3 light = vec3(view * vec4(light0.position, 1.0));\n"
" vec4 diffColor = vec4(1,1,1,1);\n"
#if VSMCUBE
" if(doTexture != 0) diffColor = vec4(vec3(texture(shadowMap, -fragmentToLight_world.xyz).r), 1.0);\n"
#else
" if(doTexture != 0) diffColor = vec4(vec3(texture(shadowMap, vec2(uv.x, 1.0 - uv.y)).r), 1.0);\n"
#endif
#if 1
" vec3 positionToLight = light - fragment;\n"
" vec3 lightDir = normalize(positionToLight);\n"
" // Angle between fragment-normal and incoming light\n"
" float cosAngIncidence = dot(lightDir, normal);\n"
" cosAngIncidence = clamp(cosAngIncidence, 0, 1);\n"
" float attenuation = 1.0f;\n"
" attenuation = 1.0 / (light0.constantAttenuation + light0.linearAttenuation * length(positionToLight) + light0.quadraticAttenuation * pow(length(positionToLight),2));\n"
" vec4 diffuse = diffColor * light0.diffuse * cosAngIncidence * attenuation;\n"
" vec4 total_lighting;\n"
" total_lighting += vec4(0.1, 0.1, 0.1, 1.0) * diffColor; // Ambient\n"
" total_lighting += diffuse * shadowFactor; // Diffuse\n"
#else
" vec4 total_lighting = diffColor;\n"
#endif
" return vec4(clamp(vec3(total_lighting), 0., 1.), 1.0);\n"
"};\n";
static const char *const fs_0_0_shadowmap_unlit = "//" FILELINE "\n"
// "uniform mat4 view = mat4(1.0);\n"
"uniform vec3 lightPos = vec3(1.0);\n"
"uniform float doTexture = 0.;\n"
"uniform sampler2D shadowMap;\n"
"vec4 shadowmap(in vec4 vpeye, in vec4 vneye, in vec2 Texcoord, in vec4 sc) {\n"
" return vec4(1.);\n"
"};\n";
static const char *const vs_3_3_skybox = "//" FILELINE "\n"
"uniform mat4 u_mvp;\n"
"in vec3 att_position;\n"
"out vec3 v_direction;\n"
"void main() {\n"
" vec4 position = u_mvp * vec4(att_position, 0.0);\n"
" gl_Position = position.xyww;\n"
" v_direction = att_position;\n"
"}\n";
static const char *const fs_3_4_skybox = "//" FILELINE "\n"
"uniform samplerCube u_cubemap;\n"
"in vec3 v_direction;\n"
"out vec4 fragcolor;\n"
"void main() {\n"
" fragcolor = vec4(texture(u_cubemap, v_direction).rgb, 1.0);\n"
"}\n";
static const char *const fs_3_4_skybox_rayleigh = "//" FILELINE "\n"
"uniform vec3 uSunPos = vec3( 0, 0.1, -1 ); // = [0, Math.cos(theta) * 0.3 + 0.2, -1];\n"
"in vec3 v_direction;\n"
"out vec4 fragcolor;\n"
"vec3 atmosphere(vec3 r, vec3 r0, vec3 pSun, float iSun, float rPlanet, float rAtmos, vec3 kRlh, float kMie, float shRlh, float shMie, float g);\n"
"void main() {\n"
" vec3 color = atmosphere(\n"
" normalize(v_direction), // normalized ray direction\n"
" vec3(0,6372e3,0), // ray origin\n"
" uSunPos, // position of the sun\n"
" 22.0, // intensity of the sun\n"
" 6371e3, // radius of the planet in meters\n"
" 6471e3, // radius of the atmosphere in meters\n"
" vec3(5.5e-6, 13.0e-6, 22.4e-6), // Rayleigh scattering coefficient\n"
" 21e-6, // Mie scattering coefficient\n"
" 8e3, // Rayleigh scale height\n"
" 1.2e3, // Mie scale height\n"
" 0.758 // Mie preferred scattering direction\n"
" );\n"
" // Apply exposure.\n"
" color = 1.0 - exp(-1.0 * color);\n"
" fragcolor = vec4(color, 1);\n"
"}\n"
"// [src] https://github.com/wwwtyro/glsl-atmosphere by wwwtyro (Unlicensed)\n"
"// For more information, please refer to <http://unlicense.org>\n"
"#define PI 3.141592\n"
"#define iSteps 16\n"
"#define jSteps 8\n"
"vec2 rsi(vec3 r0, vec3 rd, float sr) {\n"
" // ray-sphere intersection that assumes\n"
" // the sphere is centered at the origin.\n"
" // No intersection when result.x > result.y\n"
" float a = dot(rd, rd);\n"
" float b = 2.0 * dot(rd, r0);\n"
" float c = dot(r0, r0) - (sr * sr);\n"
" float d = (b*b) - 4.0*a*c;\n"
" if (d < 0.0) return vec2(1e5,-1e5);\n"
" return vec2(\n"
" (-b - sqrt(d))/(2.0*a),\n"
" (-b + sqrt(d))/(2.0*a)\n"
" );\n"
"}\n"
"vec3 atmosphere(vec3 r, vec3 r0, vec3 pSun, float iSun, float rPlanet, float rAtmos, vec3 kRlh, float kMie, float shRlh, float shMie, float g) {\n"
" // Normalize the sun and view directions.\n"
" pSun = normalize(pSun);\n"
" r = normalize(r);\n"
" // Calculate the step size of the primary ray.\n"
" vec2 p = rsi(r0, r, rAtmos);\n"
" if (p.x > p.y) return vec3(0,0,0);\n"
" p.y = min(p.y, rsi(r0, r, rPlanet).x);\n"
" float iStepSize = (p.y - p.x) / float(iSteps);\n"
" // Initialize the primary ray time.\n"
" float iTime = 0.0;\n"
" // Initialize accumulators for Rayleigh and Mie scattering.\n"
" vec3 totalRlh = vec3(0,0,0);\n"
" vec3 totalMie = vec3(0,0,0);\n"
" // Initialize optical depth accumulators for the primary ray.\n"
" float iOdRlh = 0.0;\n"
" float iOdMie = 0.0;\n"
" // Calculate the Rayleigh and Mie phases.\n"
" float mu = dot(r, pSun);\n"
" float mumu = mu * mu;\n"
" float gg = g * g;\n"
" float pRlh = 3.0 / (16.0 * PI) * (1.0 + mumu);\n"
" float pMie = 3.0 / (8.0 * PI) * ((1.0 - gg) * (mumu + 1.0)) / (pow(1.0 + gg - 2.0 * mu * g, 1.5) * (2.0 + gg));\n"
" // Sample the primary ray.\n"
" for (int i = 0; i < iSteps; i++) {\n"
" // Calculate the primary ray sample position.\n"
" vec3 iPos = r0 + r * (iTime + iStepSize * 0.5);\n"
" // Calculate the height of the sample.\n"
" float iHeight = length(iPos) - rPlanet;\n"
" // Calculate the optical depth of the Rayleigh and Mie scattering for this step.\n"
" float odStepRlh = exp(-iHeight / shRlh) * iStepSize;\n"
" float odStepMie = exp(-iHeight / shMie) * iStepSize;\n"
" // Accumulate optical depth.\n"
" iOdRlh += odStepRlh;\n"
" iOdMie += odStepMie;\n"
" // Calculate the step size of the secondary ray.\n"
" float jStepSize = rsi(iPos, pSun, rAtmos).y / float(jSteps);\n"
" // Initialize the secondary ray time.\n"
" float jTime = 0.0;\n"
" // Initialize optical depth accumulators for the secondary ray.\n"
" float jOdRlh = 0.0;\n"
" float jOdMie = 0.0;\n"
" // Sample the secondary ray.\n"
" for (int j = 0; j < jSteps; j++) {\n"
" // Calculate the secondary ray sample position.\n"
" vec3 jPos = iPos + pSun * (jTime + jStepSize * 0.5);\n"
" // Calculate the height of the sample.\n"
" float jHeight = length(jPos) - rPlanet;\n"
" // Accumulate the optical depth.\n"
" jOdRlh += exp(-jHeight / shRlh) * jStepSize;\n"
" jOdMie += exp(-jHeight / shMie) * jStepSize;\n"
" // Increment the secondary ray time.\n"
" jTime += jStepSize;\n"
" }\n"
" // Calculate attenuation.\n"
" vec3 attn = exp(-(kMie * (iOdMie + jOdMie) + kRlh * (iOdRlh + jOdRlh)));\n"
" // Accumulate scattering.\n"
" totalRlh += odStepRlh * attn;\n"
" totalMie += odStepMie * attn;\n"
" // Increment the primary ray time.\n"
" iTime += iStepSize;\n"
" }\n"
" // Calculate and return the final color.\n"
" return iSun * (pRlh * kRlh * totalRlh + pMie * kMie * totalMie);\n"
"}\n";
static const char *const vs_332_32 = "//" FILELINE "\n"
//"uniform mat4 u_model, u_view, u_proj;\n"
"uniform mat4 u_mvp;\n"
"in vec3 att_position;\n"
"in vec3 att_normal;\n"
"in vec2 att_texcoord;\n"
"out vec3 v_normal;\n"
"out vec3 v_normal_ws;\n"
"out vec2 v_texcoord;\n"
// shadow
"uniform mat4 model, view, proj;\n"
"uniform mat4 cameraToShadowProjector;\n" // !VSMCUBE
"out vec4 vneye;\n"
"out vec4 vpeye;\n"
"out vec4 sc;\n" // !VSMCUBE
"void do_shadow() {\n"
" vneye = view * model * vec4(att_normal, 0.0f);\n"
" vpeye = view * model * vec4(att_position, 1.0);\n"
" sc = cameraToShadowProjector * model * vec4(att_position, 1.0f);\n"
"}\n"
"void main() {\n"
//" gl_Position = proj * view * model * vec4(att_position, 1.0);\n"
" gl_Position = u_mvp * vec4(att_position, 1.0);\n"
" v_normal = normalize(att_normal);\n"
" v_normal_ws = normalize(vec3(model * vec4(att_normal, 0.)));\n" // normal world/model space
" v_texcoord = att_texcoord;\n"
" do_shadow();\n"
"}";
static const char *const vs_0_2_fullscreen_quad_A = "//" FILELINE "\n"
"out vec2 texcoord;\n"
"void main() {\n"
" texcoord = vec2( (gl_VertexID << 1) & 2, gl_VertexID & 2 );\n"
" gl_Position = vec4( texCoord * 2.0 - 1.0, 0.0, 1.0 );\n"
"}\n";
static const char *const vs_0_2_fullscreen_quad_B = "//" FILELINE "\n"
"out vec2 uv;\n"
"void main() {\n"
" float x = float(((uint(gl_VertexID) + 2u) / 3u)%2u); \n"
" float y = float(((uint(gl_VertexID) + 1u) / 3u)%2u); \n"
" gl_Position = vec4(-1.0 + x*2.0, 0.0+(-1.0+y*2.0), 0.0, 1.0);\n" // normal(0+),flipped(0-)
" uv = vec2(x, y);\n" // normal(y),flipped(1.0-y)
"}\n";
static const char *const vs_0_2_fullscreen_quad_B_flipped = "//" FILELINE "\n"
"out vec2 uv;\n"
"void main() {\n"
" float x = float(((uint(gl_VertexID) + 2u) / 3u)%2u); \n"
" float y = float(((uint(gl_VertexID) + 1u) / 3u)%2u); \n"
" gl_Position = vec4(-1.0 + x*2.0, 0.0-(-1.0+y*2.0), 0.0, 1.0);\n" // normal(0+),flipped(0-)
" uv = vec2(x, y);\n" // normal(y),flipped(1.0-y)
"}\n";
/*
"out vec2 uv;\n"
"void main() {\n"
" float x = gl_VertexID / 2;\n"
" float y = gl_VertexID % 2;\n"
" uv = vec2(x, y);\n"
" gl_Position = vec4(2.0*uv - 1.0, 0.0, 1.0);\n"
"}\n";
*/
static const char *const fs_2_4_texel_inv_gamma = "//" FILELINE "\n"
"uniform sampler2D texture0; /*unit0*/\n"
"uniform float u_inv_gamma;\n"
"in vec2 uv;\n"
"out vec4 fragcolor;\n"
"void main() {\n"
" vec4 texel = texture( texture0, uv );\n"
" fragcolor = texel;\n"
" fragcolor.rgb = pow( fragcolor.rgb, vec3( u_inv_gamma ) );\n" // defaults: 1.0/2.2 gamma
"}\n";
static const char *const vs_32344443_332_model = "//" FILELINE "\n"
"#ifndef MAX_BONES\n"
"#define MAX_BONES 110\n"
"#endif\n"
"uniform mat3x4 vsBoneMatrix[MAX_BONES];\n"
"uniform bool SKINNED = false;\n"
// "uniform mat4 M;\n" // RIM
"uniform mat4 MVP;\n"
"in vec3 att_position;\n"
"in vec2 att_texcoord;\n"
"in vec3 att_normal;\n"
"in vec4 att_tangent;\n"
"in vec4 att_indexes;\n"
"in vec4 att_weights;\n"
"in vec4 att_color;\n"
"in vec3 att_bitangent;\n"
"out vec3 v_position;\n"
"out vec3 v_normal, v_normal_ws;\n"
"out vec2 v_texcoord;\n"
// shadow
"uniform mat4 model, view;\n"
"uniform mat4 cameraToShadowProjector;\n"
"out vec4 vneye;\n"
"out vec4 vpeye;\n"
"out vec4 sc;\n"
"void do_shadow() {\n"
" vneye = view * model * vec4(att_normal, 0.0f);\n"
" vpeye = view * model * vec4(att_position, 1.0);\n"
" sc = cameraToShadowProjector * model * vec4(att_position, 1.0f);\n"
"}\n"
"void main() {\n"
" vec3 objPos;\n"
" if(!SKINNED) {\n"
" objPos = att_position;\n"
" v_normal = att_normal;\n"
" } else {\n"
" mat3x4 m = vsBoneMatrix[int(att_indexes.x)] * att_weights.x;\n"
" m += vsBoneMatrix[int(att_indexes.y)] * att_weights.y;\n"
" m += vsBoneMatrix[int(att_indexes.z)] * att_weights.z;\n"
" m += vsBoneMatrix[int(att_indexes.w)] * att_weights.w;\n"
" objPos = vec4(att_position, 1.0) * m;\n"
" v_normal = vec4(att_normal, 0.0) * m;\n"
" //@todo: tangents\n"
" }\n"
" v_normal_ws = normalize(vec3(model * vec4(v_normal, 0.)));\n" // normal to world/model space
" v_normal = normalize(v_normal);\n"
" v_position = att_position;\n"
" v_texcoord = att_texcoord;\n"
" gl_Position = MVP * vec4( objPos, 1.0 );\n"
" do_shadow();\n"
"}\n";
#if 0
static const char *const fs_32_4_model_basic = "//" FILELINE "\n"
"uniform sampler2D fsDiffTex;\n"
"uniform sampler2D fsNormalTex;\n"
"uniform sampler2D fsPositionTex;\n"
"uniform mat4 MVP;\n"
"in vec3 v_normal;\n"
"in vec2 v_texcoord;\n"
"out vec4 fragColor;\n"
"void main() {\n"
" vec4 diff = texture(fsDiffTex, v_texcoord).rgba;\n"
" vec3 n = normalize(mat3(MVP) * v_normal); // transform normal to eye space\n"
" fragColor = diff;// * vec4(v_normal.xyz, 1);\n"
"}\n";
#endif
static const char *const fs_32_4_model = "//" FILELINE "\n"
"uniform mat4 model, view;\n"
"uniform sampler2D u_texture2d;\n"
"uniform vec3 u_coefficients_sh[9];\n"
"uniform bool u_textured = true;\n"
"uniform bool u_lit = false;\n"
"#ifdef RIM\n"
"in vec3 v_position;\n"
"#endif\n"
"in vec3 v_normal, v_normal_ws;\n"
"in vec2 v_texcoord;\n"
"out vec4 fragcolor;\n"
"{{include-shadowmap}}\n"
"in vec4 vpeye;\n"
"in vec4 vneye;\n"
"in vec4 sc;\n"
"vec4 get_shadow() {\n"
" return shadowmap(vpeye, vneye, v_texcoord, sc);\n"
"}\n"
"void main() {\n"
" vec3 n = /*normalize*/(v_normal);\n"
" vec3 SHLightResult[9];\n"
" SHLightResult[0] = 0.282095f * u_coefficients_sh[0];\n"
" SHLightResult[1] = -0.488603f * u_coefficients_sh[1] * n.y;\n"
" SHLightResult[2] = 0.488603f * u_coefficients_sh[2] * n.z;\n"
" SHLightResult[3] = -0.488603f * u_coefficients_sh[3] * n.x;\n"
" SHLightResult[4] = 1.092548f * u_coefficients_sh[4] * n.x * n.y;\n"
" SHLightResult[5] = -1.092548f * u_coefficients_sh[5] * n.y * n.z;\n"
" SHLightResult[6] = 0.315392f * u_coefficients_sh[6] * (3.0f * n.z * n.z - 1.0f);\n"
" SHLightResult[7] = -1.092548f * u_coefficients_sh[7] * n.x * n.z;\n"
" SHLightResult[8] = 0.546274f * u_coefficients_sh[8] * (n.x * n.x - n.y * n.y);\n"
" vec3 result = vec3(0.0);\n"
" for (int i = 0; i < 9; ++i)\n"
" result += SHLightResult[i];\n"
// lighting
" if(u_textured && u_lit) fragcolor = texture(u_texture2d, v_texcoord) * vec4(result, 1.0);\n" // diffuse + lit
" else if(u_textured) fragcolor = texture(u_texture2d, v_texcoord);\n" // diffuse only
" else fragcolor = vec4(result, 1.0);\n" // lit only
// shadowing
"fragcolor *= get_shadow();\n"
// matcap
// "vec2 muv = vec2(view * vec4(v_normal_ws, 0))*0.5+vec2(0.5,0.5);\n" // normal (model space) to view space
// "fragcolor = texture(u_texture2d, vec2(muv.x, 1.0-muv.y));\n"
// rimlight
"#ifdef RIM\n"
" {vec3 n = normalize(mat3(M) * v_normal); // convert normal to view space\n"
" vec3 p = (M * vec4(v_position,1.0)).xyz; // convert position to view space\n"
" vec3 v = normalize(-p); // eye vector\n"
" float rim = 1.0 - max(dot(v, n), 0.0); // rimlight\n"
" rim = smoothstep(1.0-0.01, 1.0, rim); // intensity (0.01)\n"
" fragcolor += vec4(0.0, 0.0, rim, 1.0);} // blue\n"
"#endif\n"
"}\n";
static const char *const fs_2_4_texel_ycbr_gamma_saturation = "//" FILELINE "\n"
"uniform sampler2D u_texture_y; /*unit0*/\n"
"uniform sampler2D u_texture_cb; /*unit1*/\n"
"uniform sampler2D u_texture_cr; /*unit2*/\n"
"uniform float u_gamma;\n"
"in vec2 uv;\n"
"out vec4 fragcolor;\n"
"void main() {\n"
" float y = texture(u_texture_y, uv).r;\n"
" float cb = texture(u_texture_cb, uv).r;\n"
" float cr = texture(u_texture_cr, uv).r;\n"
" const mat4 to_rgb = mat4(\n"
" 1.0000, 1.0000, 1.0000, 0.0000,\n"
" 0.0000, -0.3441, 1.7720, 0.0000,\n"
" 1.4020, -0.7141, 0.0000, 0.0000,\n"
" -0.7010, 0.5291, -0.8860, 1.0000\n"
" );\n"
" vec4 texel = to_rgb * vec4(y, cb, cr, 1.0);\n"
/* same as:
" vec3 yCbCr = vec3(y,cb-0.5,cr-0.5);\n"
" vec4 texel = vec4( dot( vec3( 1.0, 0.0, 1.402 ), yCbCr ),\n"
" dot( vec3( 1.0 , -0.34414 , -0.71414 ), yCbCr ),\n"
" dot( vec3( 1.0, 1.772, 0.0 ), yCbCr ), 1.0);\n"
*/
" // gamma correction\n"
" texel.rgb = pow(texel.rgb, vec3(1.0 / u_gamma));\n"
" // saturation (algorithm from Chapter 16 of OpenGL Shading Language)\n"
" if(false) { float saturation = 2.0; const vec3 W = vec3(0.2125, 0.7154, 0.0721);\n"
" vec3 intensity = vec3(dot(texel.rgb, W));\n"
" texel.rgb = mix(intensity, texel.rgb, saturation); }\n"
" fragcolor = vec4(texel.rgb, 1.0);\n"
"}\n";
static const char *const vs_324_24_sprite = "//" FILELINE "\n"
"uniform mat4 u_mvp;\n"
"in vec3 att_Position;\n"
"in vec2 att_TexCoord;\n"
"in vec4 att_Color;\n"
"out vec2 vTexCoord;\n"
"out vec4 vColor;\n"
"void main() {\n"
" vColor = att_Color;\n"
" vTexCoord = att_TexCoord;\n"
" gl_Position = u_mvp * vec4(att_Position, 1.0);\n"
"}\n";
static const char *const fs_24_4_sprite = "//" FILELINE "\n"
"uniform sampler2D u_texture;\n"
"in vec2 vTexCoord;\n"
"in vec4 vColor;\n"
"out vec4 fragColor;\n"
"void main() {\n"
" vec4 texColor = texture(u_texture, vTexCoord);\n"
"texColor = vColor * texColor;\n"
"if(texColor.a < 0.5) discard;"
" fragColor = texColor;\n"
"}\n";
static const char *const fs_2_4_preamble = "//" FILELINE "\n"
"#define texture2D texture\n"
"#define texture2DLod textureLod\n"
"#define FRAGCOLOR fragColor\n"
"#define texcoord uv\n"
"#define TEXCOORD uv\n"
"uniform sampler2D iChannel0;\n"
"uniform sampler2D iChannel1;\n"
"uniform float iWidth, iHeight, iTime, iFrame, iMousex, iMousey;\n"
"uniform float iChannelRes0x, iChannelRes0y;\n"
"uniform float iChannelRes1x, iChannelRes1y;\n"
"vec2 iResolution = vec2(iWidth, iHeight);\n"
"vec2 iMouse = vec2(iMousex, iMousey);\n"
"vec2 iChannelResolution[2] = vec2[2]( vec2(iChannelRes0x, iChannelRes0y),vec2(iChannelRes1x, iChannelRes1y) );\n"
"float iGlobalTime = iTime;\n"
"in vec2 texcoord;\n"
"out vec4 fragColor;\n";
static const char *const fs_main_shadertoy = "//" FILELINE "\n"
"void mainImage( out vec4 fragColor, in vec2 fragCoord );\n"
"void main() {\n"
" mainImage(fragColor, texcoord.xy * iResolution);\n"
"}\n";
// ----------------------------------------------------------------------------
// shaders
void shader_print(const char *source) {
for(int line = 0, i = 0; source[i] > 0; ) {
printf("\t%03d: ", line+1);
while( source[i] >= 32 || source[i] == '\t' ) fputc(source[i++], stdout);
while( source[i] > 0 && source[i] < 32 ) line += source[i++] == '\n';
puts("");
}
}
static
GLuint shader_compile( GLenum type, const char *source ) {
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, (const char **)&source, NULL);
glCompileShader(shader);
GLint status = GL_FALSE, length;
glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
if( status == GL_FALSE ) {
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &length);
ASSERT(length < 2048);
char buf[2048] = { 0 };
glGetShaderInfoLog(shader, length, NULL, buf);
// dump log with line numbers
shader_print( source );
PANIC("ERROR: shader_compile(): %s\n%s\n", type == GL_VERTEX_SHADER ? "Vertex" : "Fragment", buf);
return 0;
}
return shader;
}
unsigned shader(const char *vs, const char *fs, const char *attribs, const char *fragcolor) {
PRINTF("Compiling shader\n");
fs = fs[0] == '#' && fs[1] == 'v' ? fs : stringf("#version 140\n%s", fs);
vs = vs[0] == '#' && vs[1] == 'v' ? vs : stringf("#version 140\n%s", vs);
GLuint vert = shader_compile(GL_VERTEX_SHADER, vs);
GLuint frag = shader_compile(GL_FRAGMENT_SHADER, fs);
//GLuint geom = shader_compile(GL_GEOMETRY_SHADER, gs);
GLuint program = 0;
if( vert && frag ) {
program = glCreateProgram();
glAttachShader(program, vert);
glAttachShader(program, frag);
// glAttachShader(program, geom);
for( int i = 0; attribs && attribs[0]; ++i ) {
char attrib[128] = {0};
sscanf(attribs, "%127[^,]", attrib);
while( attribs[0] && attribs[0] != ',' ) { attribs++; }
while( attribs[0] && attribs[0] == ',' ) { attribs++; break; }
if(!attrib[0]) continue;
glBindAttribLocation(program, i, attrib);
PRINTF("Shader.attribute[%d]=%s\n", i, attrib);
}
glBindFragDataLocation(program, 0, fragcolor);
glLinkProgram(program);
GLint status = GL_FALSE, length;
glGetProgramiv(program, GL_LINK_STATUS, &status);
#ifdef DEBUG_SHADER
if (status != GL_FALSE && program == DEBUG_SHADER) {
#else
if (status == GL_FALSE) {
#endif
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &length);
ASSERT(length < 2048);
char buf[2048] = { 0 };
glGetProgramInfoLog(program, length, NULL, buf);
puts("--- vs:");
shader_print(vs);
puts("--- fs:");
shader_print(fs);
}
if (status == GL_FALSE) {
PANIC("ERROR: shader(): Shader/program link: %s\n", buf);
return 0;
}
// glDetachShader(program, vert);
// glDetachShader(program, frag);
// glDetachShader(program, geom);
glDeleteShader(vert);
glDeleteShader(frag);
// glDeleteShader(geom);
//#ifdef DEBUG_ANY_SHADER
// PRINTF("Shader #%d:\n", program);
// shader_print(vs);
// shader_print(fs);
//#endif
}
return program;
}
void shader_destroy(unsigned program){
glDeleteProgram(program);
}
unsigned last_shader = -1;
static
int shader_uniform(const char *name) {
int ret = glGetUniformLocation(last_shader, name);
if( ret < 0 ) PRINTF("!cannot find uniform '%s' in shader program %d\n", name, (int)last_shader );
return ret;
}
unsigned shader_get_active() { return last_shader; }
unsigned shader_bind(unsigned program) { unsigned ret = last_shader; return glUseProgram(last_shader = program), ret; }
void shader_int(const char *uniform, int i) { glUniform1i(shader_uniform(uniform), i); }
void shader_float(const char *uniform, float f) { glUniform1f(shader_uniform(uniform), f); }
void shader_vec2(const char *uniform, vec2 v) { glUniform2fv(shader_uniform(uniform), 1, &v.x); }
void shader_vec3(const char *uniform, vec3 v) { glUniform3fv(shader_uniform(uniform), 1, &v.x); }
void shader_vec4(const char *uniform, vec4 v) { glUniform4fv(shader_uniform(uniform), 1, &v.x); }
void shader_mat44(const char *uniform, mat44 m) { glUniformMatrix4fv(shader_uniform(uniform), 1, GL_FALSE/*GL_TRUE*/, m); }
void shader_texture(const char *sampler, unsigned texture, unsigned unit) { glBindTexture(GL_TEXTURE_2D, texture); glActiveTexture(GL_TEXTURE0 + unit); glUniform1i(shader_uniform(sampler), unit); }
void shader_cubemap(const char *sampler, unsigned texture) { glUniform1i(shader_uniform(sampler), 0); glBindTexture(GL_TEXTURE_CUBE_MAP, texture); }
// -----------------------------------------------------------------------------
// colors
uint32_t rgba( uint8_t r, uint8_t g, uint8_t b, uint8_t a ) {
return r << 24 | g << 16 | b << 8 | a;
}
uint32_t bgra( uint8_t r, uint8_t g, uint8_t b, uint8_t a ) {
return rgba(b,g,r,a);
}
float alpha( uint32_t rgba ) {
return ( rgba & 255 ) / 255.f;
}
// -----------------------------------------------------------------------------
// images
image_t image_create(int x, int y, int flags) {
int n = 3; // defaults to RGB
if(flags & IMAGE_R) n = 1;
if(flags & IMAGE_RG) n = 2;
if(flags & IMAGE_RGB) n = 3;
if(flags & IMAGE_RGBA) n = 4;
image_t img; img.x = x; img.y = y; img.n = n;
img.pixels = REALLOC(0, x * y * n ); // @fixme: image_destroy() requires stbi allocator to match REALLOC
return img;
}
image_t image_from_mem(const char *data, int size, int flags) {
image_t img = {0};
if( data && size ) {
stbi_set_flip_vertically_on_load(flags & IMAGE_FLIP ? 1 : 0);
int n = 0;
if(flags & IMAGE_R) n = 1;
if(flags & IMAGE_RG) n = 2;
if(flags & IMAGE_RGB) n = 3;
if(flags & IMAGE_RGBA) n = 4;
img.pixels = stbi_load_from_memory(data, size, &img.x,&img.y,&img.n, n);
if( img.pixels ) {
PRINTF("Loaded image (%dx%d %.*s->%.*s)\n",img.w,img.h,img.n,"RGBA",n?n:img.n,"RGBA");
} else {
// PANIC("Error loading image (%s)\n", pathfile);
}
img.n = n ? n : img.n;
}
return img;
}
image_t image(const char *pathfile, int flags) {
const char *fname = vfs_find(pathfile);
// if( !fname[0] ) fname = vfs_find(stringf("%s.png",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.jpg",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.tga",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.jpg.png",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.tga.png",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.png.jpg",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.tga.jpg",pathfile)); // needed?
int size = 0;
char *data = vfs_load(fname, &size);
return image_from_mem(data, size, flags);
}
void image_destroy(image_t *img) {
if(img->pixels) stbi_image_free(img->pixels);
img->pixels = 0; // *img = (image_t){0}; // do not clear fields yet. might be useful in the future.
}
// bilinear interpolation (uv must be in image coords, range [0..w-1,0..h-1])
static
vec3 bilinear(image_t in, vec2 uv) { // image_bilinear_pixel() ?
float w = in.x, h = in.y, u = uv.x, v = uv.y;
float u1 = (int)u, v1 = (int)v, u2 = minf(u1+1, w-1), v2 = minf(v1+1, h-1);
float c1 = u - u1, c2 = v - v1;
uint8_t *p1 = &in.pixels8[ in.n * (int)(u1 + v1 * in.w) ];
uint8_t *p2 = &in.pixels8[ in.n * (int)(u2 + v1 * in.w) ];
uint8_t *p3 = &in.pixels8[ in.n * (int)(u1 + v2 * in.w) ];
uint8_t *p4 = &in.pixels8[ in.n * (int)(u2 + v2 * in.w) ];
vec3 A = vec3( p1[0], p1[1], p1[2] );
vec3 B = vec3( p2[0], p2[1], p2[2] );
vec3 C = vec3( p3[0], p3[1], p3[2] );
vec3 D = vec3( p4[0], p4[1], p4[2] );
return mix3(mix3(A, B, c1), mix3(C, D, c1), c2);
}
// -----------------------------------------------------------------------------
// textures
unsigned texture_update(texture_t *t, unsigned w, unsigned h, unsigned n, void *pixels, int flags) {
ASSERT( t && t->id );
ASSERT( n <= 4 );
GLuint pixel_types[] = { GL_RED, GL_RED, GL_RG, GL_RGB, GL_RGBA, GL_R32F, GL_R32F, GL_RG32F, GL_RGB32F, GL_RGBA32F };
GLenum pixel_storage = flags & TEXTURE_FLOAT ? GL_FLOAT : GL_UNSIGNED_BYTE;
GLuint pixel_type = pixel_types[ n ];
GLuint texel_type = pixel_types[ n + 5 * !!(flags & TEXTURE_FLOAT) ];
GLenum wrap = GL_CLAMP_TO_EDGE;
GLenum min_filter = GL_NEAREST, mag_filter = GL_NEAREST;
// GLfloat color = (flags&7)/7.f, border_color[4] = { color, color, color, 1.f };
if( flags & TEXTURE_BGR ) if( pixel_type == GL_RGB ) pixel_type = GL_BGR;
if( flags & TEXTURE_BGR ) if( pixel_type == GL_RGBA ) pixel_type = GL_BGRA;
if( flags & TEXTURE_SRGB ) if( texel_type == GL_RGB ) texel_type = GL_SRGB;
if( flags & TEXTURE_SRGB ) if( texel_type == GL_RGBA ) texel_type = GL_SRGB_ALPHA;
if( flags & TEXTURE_BC1 ) texel_type = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
if( flags & TEXTURE_BC2 ) texel_type = GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
if( flags & TEXTURE_BC3 ) texel_type = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
if( flags & TEXTURE_DEPTH ) texel_type = pixel_type = GL_DEPTH_COMPONENT; // GL_DEPTH_COMPONENT32
if( flags & TEXTURE_REPEAT ) wrap = GL_REPEAT;
if( flags & TEXTURE_BORDER ) wrap = GL_CLAMP_TO_BORDER;
if( flags & TEXTURE_LINEAR ) min_filter = GL_LINEAR, mag_filter = GL_LINEAR;
if( flags & TEXTURE_MIPMAPS ) min_filter = flags & TEXTURE_LINEAR ? GL_LINEAR_MIPMAP_LINEAR : GL_NEAREST_MIPMAP_LINEAR;
if( flags & TEXTURE_MIPMAPS ) mag_filter = flags & TEXTURE_LINEAR ? GL_LINEAR : GL_NEAREST;
if( 0 ) { // flags & TEXTURE_PREMULTIPLY_ALPHA )
uint8_t *p = pixels;
if(n == 2) for( unsigned i = 0; i < 2*w*h; i += 2 ) {
p[i] = (p[i] * p[i+1] + 128) >> 8;
}
if(n == 4) for( unsigned i = 0; i < 4*w*h; i += 4 ) {
p[i+0] = (p[i+0] * p[i+3] + 128) >> 8;
p[i+1] = (p[i+1] * p[i+3] + 128) >> 8;
p[i+2] = (p[i+2] * p[i+3] + 128) >> 8;
}
}
GLenum texture_type = t->flags & TEXTURE_ARRAY ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D; // @fixme: test GL_TEXTURE_2D_ARRAY
//glPixelStorei( GL_UNPACK_ALIGNMENT, n < 4 ? 1 : 4 ); // for framebuffer reading
//glActiveTexture(GL_TEXTURE0 + (flags&7));
glBindTexture(texture_type, t->id);
glTexImage2D(texture_type, 0, texel_type, w, h, 0, pixel_type, pixel_storage, pixels);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_S, wrap);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_T, wrap);
glTexParameteri(texture_type, GL_TEXTURE_MIN_FILTER, min_filter);
glTexParameteri(texture_type, GL_TEXTURE_MAG_FILTER, mag_filter);
#if 0 // only for sampler2DShadow
if( flags & TEXTURE_DEPTH ) glTexParameteri(texture_type, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
if( flags & TEXTURE_DEPTH ) glTexParameteri(texture_type, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
#endif
// if( flags & TEXTURE_BORDER ) glTexParameterfv(texture_type, GL_TEXTURE_BORDER_COLOR, border_color);
if( flags & TEXTURE_MIPMAPS ) glGenerateMipmap(texture_type);
if( flags & TEXTURE_MIPMAPS ) {
GLfloat max_aniso = 0;
// glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY, &max_aniso);
max_aniso = 4;
// glTexParameterf(texture_type, GL_TEXTURE_MAX_ANISOTROPY, max_aniso);
}
// glBindTexture(texture_type, 0); // do not unbind. current code expects texture to be bound at function exit
t->w = w;
t->h = h;
t->n = n;
t->flags = flags;
return t->id;
}
texture_t texture_create(unsigned w, unsigned h, unsigned n, void *pixels, int flags) {
texture_t texture = {0};
glGenTextures( 1, &texture.id );
texture_update( &texture, w, h, n, pixels, flags );
return texture;
}
texture_t texture_checker() {
static texture_t texture = {0};
if( !texture.id ) {
#if 0
float pixels[] = { 1,0.5,0.5,1 };
texture = texture_create(2,2,1, pixels, TEXTURE_FLOAT|TEXTURE_MIPMAPS|TEXTURE_REPEAT|TEXTURE_BORDER);
#else
uint32_t *pixels = REALLOC(0, 256*256*4);
for (int y = 0, i = 0; y < 256; y++) {
for (int x = 0; x < 256; x++) {
#if 0
extern const uint32_t secret_palette[32];
uint32_t rgb = secret_palette[ y / 8 ] * !!((x ^ y) & 0x8);
pixels[i++] = (rgb>>16) & 255;
pixels[i++] = (rgb>>8) & 255;
pixels[i++] = (rgb>>0) & 255;
pixels[i++] = 255;
#elif 0
extern const uint32_t secret_palette[32];
uint32_t rgb = ((x ^ y) & 0x8) ? secret_palette[6] : secret_palette[ 8 + ((x^y) / (256/6)) ];
pixels[i++] = (rgb>>16) & 255;
pixels[i++] = (rgb>>8) & 255;
pixels[i++] = (rgb>>0) & 255;
pixels[i++] = 255;
#else
extern const uint32_t secret_palette[32];
uint32_t lum = (x^y) & 8 ? 128 : (x^y) & 128 ? 192 : 255;
uint32_t rgb = rgba(lum,lum,lum,255);
pixels[i++] = rgb;
#endif
}
}
texture = texture_create(256,256,4, pixels, TEXTURE_RGBA|TEXTURE_MIPMAPS|TEXTURE_REPEAT|TEXTURE_BORDER);
FREE(pixels);
#endif
}
return texture;
}
texture_t texture_from_mem(const char *ptr, int len, int flags) {
image_t img = image_from_mem(ptr, len, flags);
if( img.pixels ) {
texture_t t = texture_create(img.x, img.y, img.n, img.pixels, flags);
image_destroy(&img);
return t;
}
return texture_checker();
}
texture_t texture(const char *pathfile, int flags) {
// PRINTF("Loading file %s\n", pathfile);
image_t img = image(pathfile, flags);
if( img.pixels ) {
texture_t t = texture_create(img.x, img.y, img.n, img.pixels, flags);
image_destroy(&img);
return t;
}
return texture_checker();
}
void texture_destroy( texture_t *t ) {
if(t->id) glDeleteTextures(1, &t->id);
t->id = 0;
}
// -----------------------------------------------------------------------------
// shadowmaps
shadowmap_t shadowmap(int texture_width) { // = 1024
shadowmap_t s = {0};
s.texture_width = texture_width;
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &s.saved_fb);
glGenFramebuffers(1, &s.fbo);
glBindFramebuffer(GL_FRAMEBUFFER, s.fbo);
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &s.texture);
glBindTexture(GL_TEXTURE_2D, s.texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, texture_width, texture_width, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, s.texture, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, s.saved_fb);
return s;
}
void shadowmap_destroy(shadowmap_t *s) {
if (s->texture) {
glDeleteTextures(1, &s->texture);
}
if (s->fbo) {
glDeleteFramebuffers(1, &s->fbo);
}
shadowmap_t z = {0};
*s = z;
}
void shadowmap_set_shadowmatrix(shadowmap_t *s, vec3 aLightPos, vec3 aLightAt, vec3 aLightUp, const mat44 projection) {
copy44(s->proj, projection);
s->light_position = vec4(aLightPos.x, aLightPos.y, aLightPos.z, 1);
lookat44(s->mv, aLightPos, aLightAt, aLightUp);
mat44 bias = {
0.5, 0.0, 0.0, 0.0,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.5, 0.5, 0.5, 1.0 };
// s->shadowmatrix = bias;
// s->shadowmatrix *= s->proj;
// s->shadowmatrix *= s->mv;
// multiply44x3(s->shadowmatrix, s->mv, s->proj, bias);
multiply44x3(s->shadowmatrix, bias, s->proj, s->mv);
// mvp = projection * s->mv;
// multiply44x2(s->mvp, s->mv, projection);
multiply44x2(s->mvp, projection, s->mv);
}
void shadowmap_begin(shadowmap_t *s) {
glGetIntegerv(GL_VIEWPORT, &s->saved_viewport[0]);
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &s->saved_fb);
glBindFramebuffer(GL_FRAMEBUFFER, s->fbo);
glViewport(0, 0, s->texture_width, s->texture_width);
glClearDepth(1);
glClear(GL_DEPTH_BUFFER_BIT);
}
void shadowmap_end(shadowmap_t *s) {
glViewport(s->saved_viewport[0], s->saved_viewport[1], s->saved_viewport[2], s->saved_viewport[3]);
glBindFramebuffer(GL_FRAMEBUFFER, s->saved_fb);
}
// shadowmap utils
void shadowmatrix_proj(mat44 shm_proj, float aLightFov, float znear, float zfar) {
perspective44(shm_proj, aLightFov, 1.0f, znear, zfar);
}
void shadowmatrix_ortho(mat44 shm_proj, float left, float right, float bottom, float top, float znear, float zfar) {
ortho44(shm_proj, left, right, bottom, top, znear, zfar);
}
// -----------------------------------------------------------------------------
// fullscreen quads
// usage: bind empty vao & commit call for 6 (quad) or 3 vertices (tri).
// ie, glBindVertexArray(empty_vao); glDrawArrays(GL_TRIANGLES, 0, 3);
void fullscreen_rgb_quad( texture_t texture, float gamma ) {
static int program = -1, vao = -1, u_inv_gamma = -1;
if( program < 0 ) {
const char* vs = vs_0_2_fullscreen_quad_B_flipped;
const char* fs = fs_2_4_texel_inv_gamma;
program = shader(vs, fs, "", "fragcolor" );
u_inv_gamma = glGetUniformLocation(program, "u_inv_gamma");
glGenVertexArrays( 1, &vao );
}
GLenum texture_type = texture.flags & TEXTURE_ARRAY ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D;
// glEnable( GL_BLEND );
glUseProgram( program );
glUniform1f( u_inv_gamma, 1.0f / (gamma + !gamma) );
glBindVertexArray( vao );
glActiveTexture( GL_TEXTURE0 );
glBindTexture( texture_type, texture.id );
glDrawArrays( GL_TRIANGLES, 0, 6 );
profile_incstat("drawcalls", +1);
profile_incstat("triangles", +2);
glBindTexture( texture_type, 0 );
glBindVertexArray( 0 );
glUseProgram( 0 );
// glDisable( GL_BLEND );
}
void fullscreen_ycbcr_quad( texture_t textureYCbCr[3], float gamma ) {
static int program = -1, vao = -1, u_gamma = -1, uy = -1, ucb = -1, ucr = -1;
if( program < 0 ) {
const char* vs = vs_0_2_fullscreen_quad_B_flipped;
const char* fs = fs_2_4_texel_ycbr_gamma_saturation;
program = shader(vs, fs, "", "fragcolor" );
u_gamma = glGetUniformLocation(program, "u_gamma");
uy = glGetUniformLocation(program, "u_texture_y");
ucb = glGetUniformLocation(program, "u_texture_cb");
ucr = glGetUniformLocation(program, "u_texture_cr");
glGenVertexArrays( 1, &vao );
}
// glEnable( GL_BLEND );
glUseProgram( program );
glUniform1f( u_gamma, gamma );
glBindVertexArray( vao );
glUniform1i(uy, 0);
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_2D, textureYCbCr[0].id );
glUniform1i(ucb, 1);
glActiveTexture( GL_TEXTURE1 );
glBindTexture( GL_TEXTURE_2D, textureYCbCr[1].id );
glUniform1i(ucr, 2);
glActiveTexture( GL_TEXTURE2 );
glBindTexture( GL_TEXTURE_2D, textureYCbCr[2].id );
glDrawArrays( GL_TRIANGLES, 0, 6 );
profile_incstat("drawcalls", +1);
profile_incstat("triangles", +2);
glBindTexture( GL_TEXTURE_2D, 0 );
glBindVertexArray( 0 );
glUseProgram( 0 );
// glDisable( GL_BLEND );
}
// ----------------------------------------------------------------------------
// sprites
typedef struct sprite_t {
int cellw, cellh; // dimensions of any cell in spritesheet
int frame, ncx, ncy; // frame in a (num cellx, num celly) spritesheet
float px, py, pz; // origin x, y, depth
float ox, oy, cos, sin; // offset x, offset y, cos/sin of rotation degree
float sx, sy; // scale x,y
uint32_t rgba; // vertex color
} sprite_t;
// sprite batching
typedef struct batch_t { array(sprite_t) sprites; mesh_t mesh; int dirty; } batch_t;
typedef map(int, batch_t) batch_group_t; // mapkey is anything that forces a flush. texture_id for now, might be texture_id+program_id soon
// sprite stream
typedef struct sprite_vertex { vec3 pos; vec2 uv; uint32_t rgba; } sprite_vertex;
typedef struct sprite_index { GLuint triangle[3]; } sprite_index;
#define sprite_vertex(...) M_CAST(sprite_vertex, __VA_ARGS__)
#define sprite_index(...) M_CAST(sprite_index, __VA_ARGS__)
// sprite impl
static int sprite_count = 0;
static int sprite_program = -1;
static array(sprite_index) sprite_indices = 0;
static array(sprite_vertex) sprite_vertices = 0;
static batch_group_t sprite_additive_group = {0};
static batch_group_t sprite_translucent_group = {0};
void sprite( texture_t texture, float px, float py, float pz, float rot ) {
sprite_ex( texture,
px,py,pz, rot, // position (x,y,depth), rotation angle
0,0, 1,1, // offset (x,y), scale (x,y),
0,~0u, // is_additive, tint color
0, 0,0 // frame num(x) in a (y,z) spritesheet
);
}
void sprite_ex( texture_t texture,
float px, float py, float pz, float rotation,
float ox, float oy, float sx, float sy,
int additive, uint32_t rgba,
float frame, float xcells, float ycells
) {
if (frame < 0) return;
if (frame > 0 && frame >= (xcells * ycells)) return;
// no need to queue if alpha or scale are zero
if( sx && sy && alpha(rgba) ) {
sprite_t s;
s.px = px;
s.py = py;
s.pz = pz;
s.frame = frame;
s.ncx = xcells ? xcells : 1;
s.ncy = ycells ? ycells : 1;
s.sx = sx;
s.sy = sy;
s.ox = ox * sx;
s.oy = oy * sy;
s.cellw = texture.x * sx / s.ncx;
s.cellh = texture.y * sy / s.ncy;
s.rgba = rgba;
s.cos = 1;
s.sin = 0;
if(rotation) {
rotation = (rotation + 0) * ((float)C_PI / 180);
s.cos = cosf(rotation);
s.sin = sinf(rotation);
}
batch_group_t *batches = additive == 1 ? &sprite_additive_group : &sprite_translucent_group;
#if 0
batch_t *found = map_find(*batches, texture.id);
if( !found ) found = map_insert(*batches, texture.id, (batch_t){0});
#else
batch_t *found = map_find_or_add(*batches, texture.id, (batch_t){0});
#endif
array_push(found->sprites, s);
}
}
static void sprite_rebuild_meshes() {
sprite_count = 0;
batch_group_t* list[] = { &sprite_additive_group, &sprite_translucent_group };
for( int l = 0; l < countof(list); ++l) {
for each_map_ptr(*list[l], int,_, batch_t,bt) {
bt->dirty = array_count(bt->sprites) ? 1 : 0;
if( !bt->dirty ) continue;
int index = 0;
array_clear(sprite_indices);
array_clear(sprite_vertices);
array_foreach_ptr(bt->sprites, sprite_t,it ) {
float x0 = it->ox - it->cellw/2, x3 = x0 + it->cellw;
float y0 = it->oy - it->cellh/2, y3 = y0;
float x1 = x0, x2 = x3;
float y1 = y0 + it->cellh, y2 = y1;
// @todo: move this affine transform into glsl shader
vec3 v0 = { it->px + ( x0 * it->cos - y0 * it->sin ), it->py + ( x0 * it->sin + y0 * it->cos ), it->pz };
vec3 v1 = { it->px + ( x1 * it->cos - y1 * it->sin ), it->py + ( x1 * it->sin + y1 * it->cos ), it->pz };
vec3 v2 = { it->px + ( x2 * it->cos - y2 * it->sin ), it->py + ( x2 * it->sin + y2 * it->cos ), it->pz };
vec3 v3 = { it->px + ( x3 * it->cos - y3 * it->sin ), it->py + ( x3 * it->sin + y3 * it->cos ), it->pz };
float cx = (1.0f / it->ncx) - 1e-9f;
float cy = (1.0f / it->ncy) - 1e-9f;
int idx = (int)it->frame;
int px = idx % it->ncx;
int py = idx / it->ncx;
float ux = px * cx, uy = py * cy;
float vx = ux + cx, vy = uy + cy;
vec2 uv0 = vec2(ux, uy);
vec2 uv1 = vec2(ux, vy);
vec2 uv2 = vec2(vx, vy);
vec2 uv3 = vec2(vx, uy);
array_push( sprite_vertices, sprite_vertex(v0, uv0, it->rgba) ); // Vertex 0 (A)
array_push( sprite_vertices, sprite_vertex(v1, uv1, it->rgba) ); // Vertex 1 (B)
array_push( sprite_vertices, sprite_vertex(v2, uv2, it->rgba) ); // Vertex 2 (C)
array_push( sprite_vertices, sprite_vertex(v3, uv3, it->rgba) ); // Vertex 3 (D)
// A--B A A-B
// quad | | becomes triangle |\ and triangle \|
// D--C D-C C
GLuint A = (index+0), B = (index+1), C = (index+2), D = (index+3); index += 4;
array_push( sprite_indices, sprite_index(C, D, A) ); // Triangle 1
array_push( sprite_indices, sprite_index(C, A, B) ); // Triangle 2
}
mesh_upgrade(&bt->mesh, "p3 t2 c4b", 0,array_count(sprite_vertices),sprite_vertices, 3*array_count(sprite_indices),sprite_indices, MESH_STATIC);
// clear elements from queue
sprite_count += array_count(bt->sprites);
array_clear(bt->sprites);
}
}
}
static void sprite_render_meshes() {
if( sprite_program < 0 ) {
sprite_program = shader( vs_324_24_sprite, fs_24_4_sprite,
"att_Position,att_TexCoord,att_Color",
"fragColor"
);
}
// use the shader and bind the texture @ unit 0
shader_bind(sprite_program);
glActiveTexture(GL_TEXTURE0);
// setup rendering state
glEnable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glDepthFunc(GL_LEQUAL); // try to help with zfighting
// update camera and set mvp in the uniform
mat44 mvp2d;
float zdepth_max = window_height(); // 1;
ortho44(mvp2d, 0, window_width(), window_height(), 0, -zdepth_max, +zdepth_max);
shader_mat44("u_mvp", mvp2d);
// set (unit 0) in the uniform texture sampler, and render batch
// for all additive then translucent groups
if( map_count(sprite_additive_group) > 0 ) {
glBlendFunc( GL_SRC_ALPHA, GL_ONE );
for each_map_ptr(sprite_additive_group, int,texture_id, batch_t,bt) {
if( bt->dirty ) {
shader_texture("u_texture", *texture_id, 0);
mesh_render(&bt->mesh);
}
}
// map_clear(sprite_additive_group);
}
if( map_count(sprite_translucent_group) > 0 ) {
glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
for each_map_ptr(sprite_translucent_group, int,texture_id, batch_t,bt) {
if( bt->dirty ) {
shader_texture("u_texture", *texture_id, 0);
mesh_render(&bt->mesh);
}
}
// map_clear(sprite_translucent_group);
}
glDisable(GL_DEPTH_TEST);
glDisable(GL_BLEND);
glDepthFunc(GL_LESS);
glUseProgram(0);
}
static void sprite_init() {
map_init(sprite_translucent_group, less_int, hash_int);
map_init(sprite_additive_group, less_int, hash_int);
}
static void sprite_update() {
profile(Sprite rebuild) {
sprite_rebuild_meshes();
}
profile(Sprite render) {
sprite_render_meshes();
}
}
// -----------------------------------------------------------------------------
// cubemaps
// project cubemap coords into sphere normals
static
vec3 cubemap2polar(int face, int x, int y, int texture_width) {
float u = (x / (texture_width - 1.f)) * 2 - 1;
float v = (y / (texture_width - 1.f)) * 2 - 1;
/**/ if( face == 0 ) return vec3( u, -1, -v);
else if( face == 1 ) return vec3(-v, -u, 1);
else if( face == 2 ) return vec3(-1, -u, -v);
else if( face == 3 ) return vec3(-u, 1, -v);
else if( face == 4 ) return vec3( v, -u, -1);
else return vec3( 1, u, -v);
}
// project normal in a sphere as 2d texcoord
static
vec2 polar2uv(vec3 n) {
n = norm3(n);
float theta = atan2(n.y, n.x);
float phi = atan2(n.z, hypot(n.x, n.y));
float u = (theta + C_PI) / C_PI;
float v = (C_PI/2 - phi) / C_PI;
return vec2(u, v);
}
// equirectangular panorama (2:1) to cubemap - in RGB, out RGB
static
void panorama2cubemap_(image_t out[6], const image_t in, int width){
int face;
#pragma omp parallel for
for( face = 0; face < 6; ++face ) {
out[face] = image_create(width, width, IMAGE_RGB);
for (int j=0; j < width; ++j) {
uint32_t *line = &out[ face ].pixels32[ 0 + j * width ];
for (int i=0; i < width; ++i) {
vec3 polar = cubemap2polar(face, i, j, width);
vec2 uv = polar2uv(polar);
uv = scale2(uv, in.h-1); // source coords (assumes 2:1, 2*h == w)
vec3 rgb = bilinear(in, uv);
union color {
struct { uint8_t r,g,b,a; };
uint32_t rgba;
} c = { rgb.x, rgb.y, rgb.z, 255 };
line[i] = c.rgba;
}
}
}
}
// equirectangular panorama (2:1) to cubemap - in RGB, out RGBA
void panorama2cubemap(image_t out[6], const image_t in, int width) {
int face;
#pragma omp parallel for
for( face = 0; face < 6; ++face ) {
out[face] = image_create(width, width, IMAGE_RGBA);
for (int j=0; j < width; ++j) {
uint32_t *line = &out[ face ].pixels32[ 0 + j * width ];
for (int i=0; i < width; ++i) {
vec3 polar = cubemap2polar(face, i, j, width);
vec2 uv = polar2uv(polar);
uv = scale2(uv, in.h-1); // source coords (assumes 2:1, 2*h == w)
vec3 rgb = bilinear(in, uv);
union color {
struct { uint8_t r,g,b,a; };
uint32_t rgba;
} c = { rgb.x, rgb.y, rgb.z, 255 };
line[i] = c.rgba;
}
}
}
}
cubemap_t cubemap6( const image_t images[6], int flags ) {
cubemap_t c = {0}, z = {0};
glGenTextures(1, &c.id);
glBindTexture(GL_TEXTURE_CUBE_MAP, c.id);
int samples = 0;
for (int i = 0; i < 6; i++) {
image_t img = images[i]; //image(textures[i], IMAGE_RGB);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB, img.w, img.h, 0, img.n == 3 ? GL_RGB : GL_RGBA, GL_UNSIGNED_BYTE, img.pixels);
// calculate SH coefficients (@ands)
const vec3 skyDir[] = {{ 1, 0, 0},{-1, 0, 0},{ 0, 1, 0},{ 0,-1, 0},{ 0, 0, 1},{ 0, 0,-1}};
const vec3 skyX[] = {{ 0, 0,-1},{ 0, 0, 1},{ 1, 0, 0},{ 1, 0, 0},{ 1, 0, 0},{-1, 0, 0}};
const vec3 skyY[] = {{ 0, 1, 0},{ 0, 1, 0},{ 0, 0,-1},{ 0, 0, 1},{ 0, 1, 0},{ 0, 1, 0}};
int step = 16;
for (int y = 0; y < img.h; y += step) {
unsigned char *p = (unsigned char*)img.pixels + y * img.w * img.n;
for (int x = 0; x < img.w; x += step) {
vec3 n = add3(
add3(
scale3(skyX[i], 2.0f * (x / (img.w - 1.0f)) - 1.0f),
scale3(skyY[i], -2.0f * (y / (img.h - 1.0f)) + 1.0f)),
skyDir[i]); // texelDirection;
float l = len3(n);
vec3 light = div3(vec3(p[0], p[1], p[2]), 255.0f * l * l * l); // texelSolidAngle * texel_radiance;
n = norm3(n);
c.sh[0] = add3(c.sh[0], scale3(light, 0.282095f));
c.sh[1] = add3(c.sh[1], scale3(light, -0.488603f * n.y * 2.0 / 3.0));
c.sh[2] = add3(c.sh[2], scale3(light, 0.488603f * n.z * 2.0 / 3.0));
c.sh[3] = add3(c.sh[3], scale3(light, -0.488603f * n.x * 2.0 / 3.0));
c.sh[4] = add3(c.sh[4], scale3(light, 1.092548f * n.x * n.y / 4.0));
c.sh[5] = add3(c.sh[5], scale3(light, -1.092548f * n.y * n.z / 4.0));
c.sh[6] = add3(c.sh[6], scale3(light, 0.315392f * (3.0f * n.z * n.z - 1.0f) / 4.0));
c.sh[7] = add3(c.sh[7], scale3(light, -1.092548f * n.x * n.z / 4.0));
c.sh[8] = add3(c.sh[8], scale3(light, 0.546274f * (n.x * n.x - n.y * n.y) / 4.0));
p += img.n * step;
samples++;
}
}
}
for (int s = 0; s < 9; s++) {
c.sh[s] = scale3(c.sh[s], 32.f / samples);
}
if( glGenerateMipmap )
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, glGenerateMipmap ? GL_LINEAR_MIPMAP_LINEAR : GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
return c;
}
cubemap_t cubemap( const image_t in, int flags ) {
ASSERT( in.n == 4 );
image_t out[6];
panorama2cubemap(out, in, in.h);
image_t swap[6] = { out[0],out[3],out[1],out[4],out[2],out[5] };
cubemap_t c = cubemap6(swap, flags);
int i;
#pragma omp parallel for
for( i = 0; i < 6; ++i) image_destroy(&out[i]);
return c;
}
void cubemap_destroy(cubemap_t *c) {
glDeleteTextures(1, &c->id);
c->id = 0; // do not destroy SH coefficients still. they might be useful in the future.
}
static cubemap_t *last_cubemap;
cubemap_t* cubemap_get_active() {
return last_cubemap;
}
// -----------------------------------------------------------------------------
// skyboxes
skybox_t skybox(const char *asset, int flags) {
skybox_t sky = {0};
// sky mesh
vec3 vertices[] = {{+1,-1,+1},{+1,+1,+1},{+1,+1,-1},{-1,+1,-1},{+1,-1,-1},{-1,-1,-1},{-1,-1,+1},{-1,+1,+1}};
unsigned indices[] = { 0, 1, 2, 3, 4, 5, 6, 3, 7, 1, 6, 0, 4, 2 };
sky.geometry = mesh_create("p3", 0,countof(vertices),vertices, countof(indices),indices, MESH_TRIANGLE_STRIP);
// sky program
sky.flags = flags ? flags : !!asset; // either cubemap or rayleigh
sky.program = shader(vs_3_3_skybox,
sky.flags ? fs_3_4_skybox : fs_3_4_skybox_rayleigh,
"att_position", "fragcolor");
// sky cubemap & SH
if( asset ) {
int is_panorama = vfs_size( asset );
if( is_panorama ) {
stbi_hdr_to_ldr_gamma(1.2f);
image_t panorama = image( asset, IMAGE_RGBA );
sky.cubemap = cubemap( panorama, 0 ); // RGBA required
image_destroy(&panorama);
} else {
image_t images[6] = {0};
images[0] = image( stringf("%s/posx", asset), IMAGE_RGB ); // cubepx
images[1] = image( stringf("%s/negx", asset), IMAGE_RGB ); // cubenx
images[2] = image( stringf("%s/posy", asset), IMAGE_RGB ); // cubepy
images[3] = image( stringf("%s/negy", asset), IMAGE_RGB ); // cubeny
images[4] = image( stringf("%s/posz", asset), IMAGE_RGB ); // cubepz
images[5] = image( stringf("%s/negz", asset), IMAGE_RGB ); // cubenz
sky.cubemap = cubemap6( images, 0 );
for( int i = 0; i < countof(images); ++i ) image_destroy(&images[i]);
}
}
return sky;
}
int skybox_push_state(skybox_t *sky, mat44 proj, mat44 view) {
last_cubemap = &sky->cubemap;
//glClear(GL_DEPTH_BUFFER_BIT);
//glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
//glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
mat44 mvp; multiply44x2(mvp, proj, view);
//glDepthMask(GL_FALSE);
shader_bind(sky->program);
shader_mat44("u_mvp", mvp);
if( sky->flags ) {
shader_cubemap("u_cubemap", sky->cubemap.id);
}
return 0; // @fixme: return sortable hash here?
}
int skybox_pop_state(skybox_t *sky) {
//glDepthMask(GL_TRUE);
//glClear(GL_DEPTH_BUFFER_BIT);
return 0;
}
void skybox_destroy(skybox_t *sky) {
glDeleteProgram(sky->program);
cubemap_destroy(&sky->cubemap);
mesh_destroy(&sky->geometry);
}
// -----------------------------------------------------------------------------
// meshes
mesh_t mesh_create(const char *format, int vertex_stride,int vertex_count,const void *vertex_data, int index_count,const void *index_data, int flags) {
mesh_t z = {0};
mesh_upgrade(&z, format, vertex_stride,vertex_count,vertex_data, index_count,index_data, flags);
return z;
}
void mesh_upgrade(mesh_t *m, const char *format, int vertex_stride,int vertex_count,const void *vertex_data, int index_count,const void *index_data, int flags) {
m->flags = flags;
// setup
unsigned sizeof_index = sizeof(GLuint);
unsigned sizeof_vertex = 0;
m->index_count = index_count;
m->vertex_count = vertex_count;
// iterate vertex attributes { position, normal + uv + tangent + bitangent + ... }
struct vertex_descriptor {
int vertex_type, num_attribute, num_components, alt_normalized;
int stride, offset;
} descriptor[16] = {0}, *dc = &descriptor[0];
do switch( *format ) {
break; case '*': dc->alt_normalized = 1;
break; case '0': dc->num_components = 0;
break; case '1': dc->num_components = 1;
break; case '2': dc->num_components = 2;
break; case '3': dc->num_components = 3;
break; case '4': dc->num_components = 4;
break; case 'f': dc->vertex_type = GL_FLOAT;
break; case 'u': case 'i': dc->vertex_type = GL_UNSIGNED_INT;
break; case 'b': if(format[-1] >= '0' && format[-1] <= '9') dc->vertex_type = GL_UNSIGNED_BYTE; //else bitangent.
break; case ' ': while (format[1] == ' ') format++; case '\0':
if (!dc->vertex_type) dc->vertex_type = GL_FLOAT;
dc->offset = sizeof_vertex;
sizeof_vertex += (dc->stride = dc->num_components * (dc->vertex_type == GL_UNSIGNED_BYTE ? 1 : 4));
++dc;
break; default: if( !strchr("pntcwai", *format) ) PANIC("unsupported vertex type '%c'", *format);
} while (*format++);
if(vertex_stride > 0) sizeof_vertex = vertex_stride;
// layout
if(!m->vao) glGenVertexArrays(1, &m->vao);
glBindVertexArray(m->vao);
// index data
if( index_data && index_count ) {
m->index_count = index_count;
if(!m->ibo) glGenBuffers(1, &m->ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m->ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, m->index_count * sizeof_index, index_data, flags & MESH_STREAM ? GL_STREAM_DRAW : GL_STATIC_DRAW);
}
// vertex data
if( vertex_data && vertex_count ) {
m->vertex_count = vertex_count;
if(!m->vbo) glGenBuffers(1, &m->vbo);
glBindBuffer(GL_ARRAY_BUFFER, m->vbo);
glBufferData(GL_ARRAY_BUFFER, m->vertex_count * sizeof_vertex, vertex_data, flags & MESH_STREAM ? GL_STREAM_DRAW : GL_STATIC_DRAW);
}
for( int i = 0; i < 8; ++i ) {
// glDisableVertexAttribArray(i);
}
// vertex setup: iterate descriptors
for( int i = 0; i < countof(descriptor); ++i ) {
if( descriptor[i].num_components ) {
glDisableVertexAttribArray(i);
glVertexAttribPointer(i,
descriptor[i].num_components, descriptor[i].vertex_type, (descriptor[i].vertex_type == GL_UNSIGNED_BYTE ? GL_TRUE : GL_FALSE) ^ (descriptor[i].alt_normalized ? GL_TRUE : GL_FALSE),
sizeof_vertex, (GLchar*)NULL + descriptor[i].offset);
glEnableVertexAttribArray(i);
} else {
glDisableVertexAttribArray(i);
}
}
glBindVertexArray(0);
}
void mesh_pop_state(mesh_t *sm) {
}
void mesh_push_state(mesh_t *sm, unsigned program, unsigned texture_id, float model[16], float view[16], float proj[16], unsigned billboard) {
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glActiveTexture(GL_TEXTURE0);
shader_bind(program);
mat44 mv; multiply44x2(mv, view, model);
if( billboard ) {
float d = sqrt(mv[4*0+0] * mv[4*0+0] + mv[4*1+1] * mv[4*1+1] + mv[4*2+2] * mv[4*2+2]);
if(billboard & 4) mv[4*0+0] = d, mv[4*0+1] = 0, mv[4*0+2] = 0;
if(billboard & 2) mv[4*1+0] = 0, mv[4*1+1] = d, mv[4*1+2] = 0;
if(billboard & 1) mv[4*2+0] = 0, mv[4*2+1] = 0, mv[4*2+2] = d;
}
mat44 mvp; multiply44x2(mvp, proj, mv); // multiply44x3(mvp, proj, view, model);
shader_mat44("u_mvp", mvp);
if (cubemap_get_active()) {
GLuint uniform_loc = glGetUniformLocation(program, "u_coefficients_sh");
glUniform3fv(uniform_loc, 9, &cubemap_get_active()->sh[0].x);
}
shader_texture("u_texture2d", texture_id, 0);
}
void mesh_render(mesh_t *sm) {
glBindVertexArray(sm->vao);
if( sm->ibo ) { // with indices
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, sm->ibo); // <-- why intel?
glDrawElements(sm->flags & MESH_TRIANGLE_STRIP ? GL_TRIANGLE_STRIP : GL_TRIANGLES, sm->index_count, GL_UNSIGNED_INT, (char*)0);
profile_incstat("drawcalls", +1);
profile_incstat("triangles", sm->index_count/3);
} else { // with vertices only
glDrawArrays(sm->flags & MESH_TRIANGLE_STRIP ? GL_TRIANGLE_STRIP : GL_TRIANGLES, 0, sm->vertex_count /* / 3 */);
profile_incstat("drawcalls", +1);
profile_incstat("triangles", sm->vertex_count/3);
}
}
void mesh_destroy(mesh_t *m) {
// @todo
}
// -----------------------------------------------------------------------------
// screenshots
void* screenshot( unsigned n ) { // 3 RGB, 4 RGBA, -3 BGR, -4 BGRA
int w = window_width(), h = window_height();
int mode = n == 3 ? GL_RGB : n == -3 ? GL_BGR : n == 4 ? GL_RGBA : GL_BGRA;
static local uint8_t *pixels = 0;
pixels = (uint8_t*)REALLOC(pixels, w * h * 4 );
#if 0
// sync, 10 ms
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); // disable any pbo, in case somebody did for us
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadBuffer(GL_FRONT);
glReadPixels(0, 0, w, h, mode, GL_UNSIGNED_BYTE, pixels);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
return pixels;
#else
// async
enum { NUM_PBOS = 16 };
static local GLuint pbo[NUM_PBOS] = {0}, lastw, lasth;
static local int frame = 0, bound = 0;
if( lastw != w || lasth != h ) {
lastw = w, lasth = h;
frame = 0;
bound = 0;
// @fixme: delete previous pbos
for( int i = 0; i < NUM_PBOS; ++i ) {
glGenBuffers(1, &pbo[i]);
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo[i]);
glBufferData(GL_PIXEL_PACK_BUFFER, w * h * 4, NULL, GL_STREAM_READ); // GL_STATIC_READ);
}
}
if (frame < NUM_PBOS) {
// do setup during initial frames
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo[bound]);
glReadPixels(0, 0, w, h, mode, GL_UNSIGNED_BYTE, (GLvoid*)((GLchar*)NULL+0));
} else {
// read from oldest bound pbo
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo[bound]);
void *ptr = glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
memcpy(pixels, ptr, w * h * abs(n));
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
// trigger next read
glReadPixels(0, 0, w, h, mode, GL_UNSIGNED_BYTE, (GLvoid*)((GLchar*)NULL+0));
}
bound = (bound + 1) % NUM_PBOS;
frame += frame >= 0 && frame < NUM_PBOS;
frame *= frame == NUM_PBOS ? -1 : +1;
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
return pixels;
#endif
}
// -----------------------------------------------------------------------------
// viewports
void viewport_color(vec3 color3) {
glClearColor(color3.x, color3.y, color3.z, 1);
}
void viewport_clear(bool color, bool depth) {
glClearDepthf(1);
glClearStencil(0);
glClear((color ? GL_COLOR_BUFFER_BIT : 0) | (depth ? GL_DEPTH_BUFFER_BIT : 0));
}
void viewport_clip(vec2 from, vec2 to) {
float x = from.x, y = from.y, w = to.x-from.x, h = to.y-from.y;
y = window_height()-y-h;
glViewport(x, y, w, h);
glScissor(x, y, w, h);
}
// -----------------------------------------------------------------------------
// fbos
unsigned fbo(unsigned color_texture_id, unsigned depth_texture_id, int flags) {
GLuint fbo;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
if( color_texture_id ) glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, color_texture_id, 0);
if( depth_texture_id ) glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth_texture_id, 0);
#if 0 // this is working; it's just not enabled for now
else {
// create a non-sampleable renderbuffer object for depth and stencil attachments
unsigned int rbo;
glGenRenderbuffers(1, &rbo);
glBindRenderbuffer(GL_RENDERBUFFER, rbo);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, color.width, color.height); // use a single renderbuffer object for both a depth AND stencil buffer.
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rbo); // now actually attach it
}
#endif
if(flags) glDrawBuffer(GL_NONE);
if(flags) glReadBuffer(GL_NONE);
#if 1
GLenum result = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if( GL_FRAMEBUFFER_COMPLETE != result ) {
PANIC("ERROR: Framebuffer not complete.");
}
#else
switch (glCheckFramebufferStatus(GL_FRAMEBUFFER)) {
case GL_FRAMEBUFFER_COMPLETE: break;
case GL_FRAMEBUFFER_UNDEFINED: PANIC("GL_FRAMEBUFFER_UNDEFINED");
case GL_FRAMEBUFFER_UNSUPPORTED: PANIC("GL_FRAMEBUFFER_UNSUPPORTED");
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT: PANIC("GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT");
case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER: PANIC("GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER");
case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER: PANIC("GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER");
case GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE: PANIC("GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE");
// case GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT: PANIC("GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT");
case GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS: PANIC("GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS");
// case GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT: PANIC("GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT");
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT: PANIC("GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT");
default: PANIC("ERROR: Framebuffer not complete. glCheckFramebufferStatus returned %x", glCheckFramebufferStatus(GL_FRAMEBUFFER));
}
#endif
glBindFramebuffer (GL_FRAMEBUFFER, 0);
return fbo;
}
void fbo_bind(unsigned id) {
glBindFramebuffer(GL_FRAMEBUFFER, id);
}
void fbo_unbind() {
fbo_bind(0);
}
void fbo_destroy(unsigned id) {
// glDeleteRenderbuffers(1, &renderbuffer);
glDeleteFramebuffers(1, &id);
}
// -----------------------------------------------------------------------------
// post-fxs swapchain
typedef struct passfx passfx;
typedef struct postfx postfx;
void postfx_create(postfx *fx, int flags);
void postfx_destroy(postfx *fx);
bool postfx_load(postfx *fx, const char *name, const char *fragment);
bool postfx_begin(postfx *fx, int width, int height);
bool postfx_end(postfx *fx);
bool postfx_enabled(postfx *fx, int pass_number);
bool postfx_enable(postfx *fx, int pass_number, bool enabled);
// bool postfx_toggle(postfx *fx, int pass_number);
void postfx_clear(postfx *fx);
char* postfx_name(postfx *fx, int slot);
struct passfx {
mesh_t m;
char *name;
unsigned program;
int uniforms[16];
};
struct postfx {
// renderbuffers: color & depth textures
unsigned fb[2];
texture_t diffuse[2], depth[2];
// shader passes
passfx pass[64];
uint64_t mask;
// global enable flag
bool enabled;
//
int num_loaded;
};
enum {
u_color,
u_depth,
u_time,
u_frame,
u_width, u_height,
u_mousex, u_mousey,
u_channelres0x, u_channelres0y,
u_channelres1x, u_channelres1y,
};
void postfx_create(postfx *fx, int flags) {
postfx z = {0};
*fx = z;
fx->enabled = 1;
}
void postfx_destroy( postfx *fx ) {
for( int i = 0; i < 64; ++i ) {
FREE(fx->pass[i].name);
}
texture_destroy(&fx->diffuse[0]);
texture_destroy(&fx->diffuse[1]);
texture_destroy(&fx->depth[0]);
texture_destroy(&fx->depth[1]);
fbo_destroy(fx->fb[0]);
fbo_destroy(fx->fb[1]);
postfx z = {0};
*fx = z;
}
char* postfx_name(postfx *fx, int slot) {
return fx->pass[ slot & 63 ].name;
}
bool postfx_load_from_mem( postfx *fx, const char *name, const char *fs ) {
if(!fs || !fs[0]) PANIC("!invalid fragment shader");
int slot = fx->num_loaded++;
passfx *p = &fx->pass[ slot & 63 ];
p->name = STRDUP(name);
const char *vs = vs_0_2_fullscreen_quad_B;
// patch fragment
char *fs2 = (char*)CALLOC(1, 64*1024);
strcat(fs2, fs_2_4_preamble);
if( strstr(fs, "mainImage") ) {
strcat(fs2, fs_main_shadertoy );
}
strcat(fs2, fs);
p->program = shader(vs, fs2, "vtexcoord", "fragColor" );
FREE(fs2);
glUseProgram(p->program); // needed?
for( int i = 0; i < countof(p->uniforms); ++i ) p->uniforms[i] = -1;
if( p->uniforms[u_time] == -1 ) p->uniforms[u_time] = glGetUniformLocation(p->program, "iTime");
if( p->uniforms[u_frame] == -1 ) p->uniforms[u_frame] = glGetUniformLocation(p->program, "iFrame");
if( p->uniforms[u_width] == -1 ) p->uniforms[u_width] = glGetUniformLocation(p->program, "iWidth");
if( p->uniforms[u_height] == -1 ) p->uniforms[u_height] = glGetUniformLocation(p->program, "iHeight");
if( p->uniforms[u_mousex] == -1 ) p->uniforms[u_mousex] = glGetUniformLocation(p->program, "iMousex");
if( p->uniforms[u_mousey] == -1 ) p->uniforms[u_mousey] = glGetUniformLocation(p->program, "iMousey");
if( p->uniforms[u_color] == -1 ) p->uniforms[u_color] = glGetUniformLocation(p->program, "tex");
if( p->uniforms[u_color] == -1 ) p->uniforms[u_color] = glGetUniformLocation(p->program, "tex0");
if( p->uniforms[u_color] == -1 ) p->uniforms[u_color] = glGetUniformLocation(p->program, "tColor");
if( p->uniforms[u_color] == -1 ) p->uniforms[u_color] = glGetUniformLocation(p->program, "tDiffuse");
if( p->uniforms[u_color] == -1 ) p->uniforms[u_color] = glGetUniformLocation(p->program, "iChannel0");
if( p->uniforms[u_depth] == -1 ) p->uniforms[u_depth] = glGetUniformLocation(p->program, "tex1");
if( p->uniforms[u_depth] == -1 ) p->uniforms[u_depth] = glGetUniformLocation(p->program, "tDepth");
if( p->uniforms[u_depth] == -1 ) p->uniforms[u_depth] = glGetUniformLocation(p->program, "iChannel1");
if( p->uniforms[u_channelres0x] == -1 ) p->uniforms[u_channelres0x] = glGetUniformLocation(p->program, "iChannelRes0x");
if( p->uniforms[u_channelres0y] == -1 ) p->uniforms[u_channelres0y] = glGetUniformLocation(p->program, "iChannelRes0y");
if( p->uniforms[u_channelres1x] == -1 ) p->uniforms[u_channelres1x] = glGetUniformLocation(p->program, "iChannelRes1x");
if( p->uniforms[u_channelres1y] == -1 ) p->uniforms[u_channelres1y] = glGetUniformLocation(p->program, "iChannelRes1y");
// set quad
glGenVertexArrays(1, &p->m.vao);
return true;
}
uint64_t postfx_count_ones(uint64_t x) {
// [src] https://en.wikipedia.org/wiki/Hamming_weight
x -= (x >> 1) & 0x5555555555555555ULL; //put count of each 2 bits into those 2 bits
x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL); //put count of each 4 bits into those 4 bits
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0fULL; //put count of each 8 bits into those 8 bits
return (x * 0x0101010101010101ULL) >> 56; //returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24) + ...
}
bool postfx_enable(postfx *fx, int pass, bool enabled) {
fx->mask = enabled ? fx->mask | (1ull << pass) : fx->mask & ~(1ull << pass);
fx->enabled = !!postfx_count_ones(fx->mask);
return fx->enabled;
}
bool postfx_enabled(postfx *fx, int pass) {
return (!!(fx->mask & (1ull << pass)));
}
bool postfx_toggle(postfx *fx, int pass) {
return postfx_enable(fx, pass, 1 ^ postfx_enabled(fx, pass));
}
void postfx_clear(postfx *fx) {
fx->mask = fx->enabled = 0;
}
bool postfx_begin(postfx *fx, int width, int height) {
width += !width;
height += !height;
// resize if needed
if( fx->diffuse[0].w != width || fx->diffuse[0].h != height ) {
texture_destroy(&fx->diffuse[0]);
texture_destroy(&fx->diffuse[1]);
texture_destroy(&fx->depth[0]);
texture_destroy(&fx->depth[1]);
fbo_destroy(fx->fb[0]);
fbo_destroy(fx->fb[1]);
// create texture, set texture parameters and content
fx->diffuse[0] = texture_create(width, height, 4, NULL, TEXTURE_RGBA);
fx->depth[0] = texture_create(width, height, 1, NULL, TEXTURE_DEPTH|TEXTURE_FLOAT);
fx->fb[0] = fbo(fx->diffuse[0].id, fx->depth[0].id, 0);
// create texture, set texture parameters and content
fx->diffuse[1] = texture_create(width, height, 4, NULL, TEXTURE_RGBA);
fx->depth[1] = texture_create(width, height, 1, NULL, TEXTURE_DEPTH|TEXTURE_FLOAT);
fx->fb[1] = fbo(fx->diffuse[1].id, fx->depth[1].id, 0);
}
uint64_t num_active_passes = postfx_count_ones(fx->mask);
bool active = fx->enabled && num_active_passes;
if( !active ) {
fbo_unbind();
return false;
}
fbo_bind(fx->fb[1]);
viewport_clear(true, true);
viewport_clip(vec2(0,0), vec2(width, height));
fbo_bind(fx->fb[0]);
viewport_clear(true, true);
viewport_clip(vec2(0,0), vec2(width, height));
return true;
}
bool postfx_end(postfx *fx) {
uint64_t num_active_passes = postfx_count_ones(fx->mask);
bool active = fx->enabled && num_active_passes;
if( !active ) {
return false;
}
fbo_unbind();
// disable depth test in 2d rendering
glDisable(GL_DEPTH_TEST);
int frame = 0;
float t = time_ms() / 1000.f;
float w = fx->diffuse[0].w;
float h = fx->diffuse[0].h;
float mx = input(MOUSE_X);
float my = input(MOUSE_Y);
for(int i = 0, e = countof(fx->pass); i < e; ++i) {
if( fx->mask & (1ull << i) ) {
passfx *pass = &fx->pass[i];
if( !pass->program ) { --num_active_passes; continue; }
glUseProgram(pass->program);
// bind texture to texture unit 0
// shader_texture(fx->diffuse[frame], 0);
glActiveTexture(GL_TEXTURE0 + 0); glBindTexture(GL_TEXTURE_2D, fx->diffuse[frame].id);
glUniform1i(pass->uniforms[u_color], 0);
glUniform1f(pass->uniforms[u_channelres0x], fx->diffuse[frame].w);
glUniform1f(pass->uniforms[u_channelres0y], fx->diffuse[frame].h);
// bind depth to texture unit 1
// shader_texture(fx->depth[frame], 1);
glActiveTexture(GL_TEXTURE0 + 1); glBindTexture(GL_TEXTURE_2D, fx->depth[frame].id);
glUniform1i(pass->uniforms[u_depth], 1);
// bind uniforms
static unsigned f = 0; ++f;
glUniform1f(pass->uniforms[u_time], t);
glUniform1f(pass->uniforms[u_frame], f-1);
glUniform1f(pass->uniforms[u_width], w);
glUniform1f(pass->uniforms[u_height], h);
glUniform1f(pass->uniforms[u_mousex], mx);
glUniform1f(pass->uniforms[u_mousey], my);
// bind the vao
int bound = --num_active_passes;
if( bound ) fbo_bind(fx->fb[frame ^= 1]);
// fullscreen quad
glBindVertexArray(pass->m.vao);
glDrawArrays(GL_TRIANGLES, 0, 6);
profile_incstat("drawcalls", +1);
profile_incstat("triangles", +2);
glBindVertexArray(0);
if( bound ) fbo_unbind();
else glUseProgram(0);
}
}
return true;
}
static postfx fx;
void fx_load_from_mem(const char *nameid, const char *content) {
do_once postfx_create(&fx, 0);
postfx_load_from_mem(&fx, nameid, content);
}
void fx_load(const char *file) {
postfx_load_from_mem(&fx, file_name(file), vfs_read(file));
}
void fx_begin() {
postfx_begin(&fx, window_width(), window_height());
}
void fx_end() {
postfx_end(&fx);
}
int fx_enabled(int pass) {
return postfx_enabled(&fx, pass);
}
void fx_enable(int pass, int enabled) {
postfx_enable(&fx, pass, enabled);
}
void fx_enable_all(int enabled) {
for( int i = 0; i < fx.num_loaded; ++i ) fx_enable(i, enabled);
}
char *fx_name(int pass) {
return postfx_name(&fx, pass);
}
// -----------------------------------------------------------------------------
// skeletal meshes (iqm)
#define IQM_MAGIC "INTERQUAKEMODEL"
#define IQM_VERSION 2
struct iqmheader {
char magic[16];
unsigned version;
unsigned filesize;
unsigned flags;
unsigned num_text, ofs_text;
unsigned num_meshes, ofs_meshes;
unsigned num_vertexarrays, num_vertexes, ofs_vertexarrays;
unsigned num_triangles, ofs_triangles, ofs_adjacency;
unsigned num_joints, ofs_joints;
unsigned num_poses, ofs_poses;
unsigned num_anims, ofs_anims;
unsigned num_frames, num_framechannels, ofs_frames, ofs_bounds;
unsigned num_comment, ofs_comment;
unsigned num_extensions, ofs_extensions;
};
struct iqmmesh {
unsigned name;
unsigned material;
unsigned first_vertex, num_vertexes;
unsigned first_triangle, num_triangles;
};
enum {
IQM_POSITION,
IQM_TEXCOORD,
IQM_NORMAL,
IQM_TANGENT,
IQM_BLENDINDEXES,
IQM_BLENDWEIGHTS,
IQM_COLOR,
IQM_CUSTOM = 0x10
};
enum {
IQM_BYTE,
IQM_UBYTE,
IQM_SHORT,
IQM_USHORT,
IQM_INT,
IQM_UINT,
IQM_HALF,
IQM_FLOAT,
IQM_DOUBLE,
};
struct iqmtriangle {
unsigned vertex[3];
};
struct iqmadjacency {
unsigned triangle[3];
};
struct iqmjoint {
unsigned name;
int parent;
float translate[3], rotate[4], scale[3];
};
struct iqmpose {
int parent;
unsigned mask;
float channeloffset[10];
float channelscale[10];
};
struct iqmanim {
unsigned name;
unsigned first_frame, num_frames;
float framerate;
unsigned flags;
};
enum {
IQM_LOOP = 1<<0
};
struct iqmvertexarray {
unsigned type;
unsigned flags;
unsigned format;
unsigned size;
unsigned offset;
};
struct iqmbounds {
union {
struct { float bbmin[3], bbmax[3]; };
struct { vec3 min3, max3; };
aabb box;
};
float xyradius, radius;
};
// -----------------------------------------------------------------------------
typedef struct iqm_vertex {
GLfloat position[3];
GLfloat texcoord[2];
GLfloat normal[3];
GLfloat tangent[4];
GLubyte blendindexes[4];
GLubyte blendweights[4];
GLubyte color[4];
} iqm_vertex;
typedef struct iqm_t {
int nummeshes, numtris, numverts, numjoints, numframes, numanims;
GLuint program;
GLuint vao, ibo, vbo;
GLuint *textures;
uint8_t *buf, *meshdata, *animdata;
struct iqmmesh *meshes;
struct iqmjoint *joints;
struct iqmpose *poses;
struct iqmanim *anims;
struct iqmbounds *bounds;
mat34 *baseframe, *inversebaseframe, *outframe, *frames;
GLint bonematsoffset;
} iqm_t;
#define program (q->program)
#define meshdata (q->meshdata)
#define animdata (q->animdata)
#define nummeshes (q->nummeshes)
#define numtris (q->numtris)
#define numverts (q->numverts)
#define numjoints (q->numjoints)
#define numframes (q->numframes)
#define numanims (q->numanims)
#define meshes (q->meshes)
#define textures (q->textures)
#define joints (q->joints)
#define poses (q->poses)
#define anims (q->anims)
#define baseframe (q->baseframe)
#define inversebaseframe (q->inversebaseframe)
#define outframe (q->outframe)
#define frames (q->frames)
#define vao (q->vao)
#define ibo (q->ibo)
#define vbo (q->vbo)
#define bonematsoffset (q->bonematsoffset)
#define buf (q->buf)
#define bounds (q->bounds)
static
void model_set_uniforms(model_t m, int shader, mat44 proj, mat44 view, mat44 model) {
if(!m.iqm) return;
iqm_t *q = m.iqm;
glUseProgram(shader);
int loc;
//if( (loc = glGetUniformLocation(shader, "M")) >= 0 ) glUniformMatrix4fv( loc, 1, GL_FALSE/*GL_TRUE*/, m); // RIM
if( (loc = glGetUniformLocation(shader, "MVP")) >= 0 ) {
mat44 mvp; multiply44x3(mvp, proj, view, model);
glUniformMatrix4fv( loc, 1, GL_FALSE/*GL_TRUE*/, mvp);
}
else
if( (loc = glGetUniformLocation(shader, "u_mvp")) >= 0 ) {
mat44 mvp; multiply44x3(mvp, proj, view, model);
glUniformMatrix4fv( loc, 1, GL_FALSE/*GL_TRUE*/, mvp);
}
#if 0
// @todo: mat44 projview
#endif
if ((loc = glGetUniformLocation(shader, "M")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, model);
}
else
if ((loc = glGetUniformLocation(shader, "model")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, model);
}
if ((loc = glGetUniformLocation(shader, "V")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, view);
}
else
if ((loc = glGetUniformLocation(shader, "view")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, view);
}
if ((loc = glGetUniformLocation(shader, "P")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, proj);
}
else
if ((loc = glGetUniformLocation(shader, "proj")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, proj);
}
if( (loc = glGetUniformLocation(shader, "SKINNED")) >= 0 ) glUniform1i( loc, numanims ? GL_TRUE : GL_FALSE);
if( numanims )
if( (loc = glGetUniformLocation(shader, "vsBoneMatrix")) >= 0 ) glUniformMatrix3x4fv( loc, numjoints, GL_FALSE, outframe[0]);
}
static
void model_set_state(model_t m) {
if(!m.iqm) return;
iqm_t *q = m.iqm;
glBindVertexArray( vao );
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex, position) );
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex, texcoord) );
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex, normal) );
glVertexAttribPointer(3, 4, GL_FLOAT, GL_FALSE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex, tangent) );
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glEnableVertexAttribArray(3);
if(numframes > 0) {
glVertexAttribPointer(4, 4, GL_UNSIGNED_BYTE, GL_FALSE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex,blendindexes) );
glVertexAttribPointer(5, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex,blendweights) );
glEnableVertexAttribArray(4);
glEnableVertexAttribArray(5);
}
// 6 color
// 7 bitangent? into texcoord.z?
// [draw]
#if 0
glDisableVertexAttribArray(1);
if(numframes > 0) {
glDisableVertexAttribArray(4);
glDisableVertexAttribArray(5);
}
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
#endif
glBindVertexArray( 0 );
}
static
bool model_load_meshes(iqm_t *q, const struct iqmheader *hdr) {
if(meshdata) return false;
lil32p(&buf[hdr->ofs_vertexarrays], hdr->num_vertexarrays*sizeof(struct iqmvertexarray)/sizeof(uint32_t));
lil32p(&buf[hdr->ofs_triangles], hdr->num_triangles*sizeof(struct iqmtriangle)/sizeof(uint32_t));
lil32p(&buf[hdr->ofs_meshes], hdr->num_meshes*sizeof(struct iqmmesh)/sizeof(uint32_t));
lil32p(&buf[hdr->ofs_joints], hdr->num_joints*sizeof(struct iqmjoint)/sizeof(uint32_t));
meshdata = buf;
nummeshes = hdr->num_meshes;
numtris = hdr->num_triangles;
numverts = hdr->num_vertexes;
numjoints = hdr->num_joints;
outframe = CALLOC(hdr->num_joints, sizeof(mat34));
float *inposition = NULL, *innormal = NULL, *intangent = NULL, *intexcoord = NULL;
uint8_t *inblendindex8 = NULL, *inblendweight8 = NULL;
int *inblendindexi = NULL; float *inblendweightf = NULL;
struct iqmvertexarray *vas = (struct iqmvertexarray *)&buf[hdr->ofs_vertexarrays];
for(int i = 0; i < (int)hdr->num_vertexarrays; i++) {
struct iqmvertexarray *va = &vas[i];
switch(va->type) {
default: continue; // return PANIC("unknown iqm vertex type (%d)", va->type), false;
break; case IQM_POSITION: if(va->format != IQM_FLOAT || va->size != 3) return PANIC("!"); false; inposition = (float *)&buf[va->offset]; lil32pf(inposition, 3*hdr->num_vertexes);
break; case IQM_NORMAL: if(va->format != IQM_FLOAT || va->size != 3) return PANIC("!"); false; innormal = (float *)&buf[va->offset]; lil32pf(innormal, 3*hdr->num_vertexes);
break; case IQM_TANGENT: if(va->format != IQM_FLOAT || va->size != 4) return PANIC("!"); false; intangent = (float *)&buf[va->offset]; lil32pf(intangent, 4*hdr->num_vertexes);
break; case IQM_TEXCOORD: if(va->format != IQM_FLOAT || va->size != 2) return PANIC("!"); false; intexcoord = (float *)&buf[va->offset]; lil32pf(intexcoord, 2*hdr->num_vertexes);
break; case IQM_BLENDINDEXES: if(va->size != 4) return PANIC("!"); false; if(va->format != IQM_UBYTE && va->format != IQM_INT) return PANIC("!"); false;
if(va->format == IQM_UBYTE) inblendindex8 = (uint8_t *)&buf[va->offset];
else inblendindexi = (int *)&buf[va->offset];
break; case IQM_BLENDWEIGHTS: if(va->size != 4) return PANIC("!"); false; if(va->format != IQM_UBYTE && va->format != IQM_FLOAT) return PANIC("!"); false;
if(va->format == IQM_UBYTE) inblendweight8 = (uint8_t *)&buf[va->offset];
else inblendweightf = (float *)&buf[va->offset];
}
}
if (hdr->ofs_bounds) lil32p(buf + hdr->ofs_bounds, hdr->num_frames * sizeof(struct iqmbounds));
if (hdr->ofs_bounds) bounds = (struct iqmbounds *) &buf[hdr->ofs_bounds];
meshes = (struct iqmmesh *)&buf[hdr->ofs_meshes];
joints = (struct iqmjoint *)&buf[hdr->ofs_joints];
baseframe = CALLOC(hdr->num_joints, sizeof(mat34));
inversebaseframe = CALLOC(hdr->num_joints, sizeof(mat34));
for(int i = 0; i < (int)hdr->num_joints; i++) {
struct iqmjoint *j = &joints[i];
compose34(baseframe[i], ptr3(j->translate), normq(ptrq(j->rotate)), ptr3(j->scale));
invert34(inversebaseframe[i], baseframe[i]);
if(j->parent >= 0) {
multiply34x2(baseframe[i], baseframe[j->parent], baseframe[i]);
multiply34(inversebaseframe[i], inversebaseframe[j->parent]);
}
}
struct iqmtriangle *tris = (struct iqmtriangle *)&buf[hdr->ofs_triangles];
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
if(!ibo) glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, hdr->num_triangles*sizeof(struct iqmtriangle), tris, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
iqm_vertex *verts = CALLOC(hdr->num_vertexes, sizeof(iqm_vertex));
for(int i = 0; i < (int)hdr->num_vertexes; i++) {
iqm_vertex *v = &verts[i];
if(inposition) memcpy(v->position, &inposition[i*3], sizeof(v->position));
if(innormal) memcpy(v->normal, &innormal[i*3], sizeof(v->normal));
if(intangent) memcpy(v->tangent, &intangent[i*4], sizeof(v->tangent));
if(intexcoord) memcpy(v->texcoord, &intexcoord[i*2], sizeof(v->texcoord));
if(inblendindex8) memcpy(v->blendindexes, &inblendindex8[i*4], sizeof(v->blendindexes));
if(inblendweight8) memcpy(v->blendweights, &inblendweight8[i*4], sizeof(v->blendweights));
if(inblendindexi) {
uint8_t conv[4] = { inblendindexi[i*4], inblendindexi[i*4+1], inblendindexi[i*4+2], inblendindexi[i*4+3] };
memcpy(v->blendindexes, conv, sizeof(v->blendindexes));
}
if(inblendweightf) {
uint8_t conv[4] = { inblendweightf[i*4] * 255, inblendweightf[i*4+1] * 255, inblendweightf[i*4+2] * 255, inblendweightf[i*4+3] * 255 };
memcpy(v->blendweights, conv, sizeof(v->blendweights));
}
}
if(!vbo) glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, hdr->num_vertexes*sizeof(iqm_vertex), verts, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
FREE(verts);
textures = CALLOC(hdr->num_meshes, sizeof(GLuint));
for(int i = 0; i < (int)hdr->num_meshes; i++) {
int invalid = texture_checker().id;
textures[i] = invalid;
}
const char *str = hdr->ofs_text ? (char *)&buf[hdr->ofs_text] : "";
for(int i = 0; i < (int)hdr->num_meshes; i++) {
struct iqmmesh *m = &meshes[i];
PRINTF("loaded mesh: %s\n", &str[m->name]);
}
return true;
}
static
bool model_load_anims(iqm_t *q, const struct iqmheader *hdr) {
if((int)hdr->num_poses != numjoints) return false;
if(animdata) {
if(animdata != meshdata) FREE(animdata);
FREE(frames);
animdata = NULL;
anims = NULL;
frames = 0;
numframes = 0;
numanims = 0;
}
lil32p(&buf[hdr->ofs_poses], hdr->num_poses*sizeof(struct iqmpose)/sizeof(uint32_t));
lil32p(&buf[hdr->ofs_anims], hdr->num_anims*sizeof(struct iqmanim)/sizeof(uint32_t));
lil16p((uint16_t *)&buf[hdr->ofs_frames], hdr->num_frames*hdr->num_framechannels);
animdata = buf;
numanims = hdr->num_anims;
numframes = hdr->num_frames;
anims = (struct iqmanim *)&buf[hdr->ofs_anims];
poses = (struct iqmpose *)&buf[hdr->ofs_poses];
frames = CALLOC(hdr->num_frames * hdr->num_poses, sizeof(mat34));
uint16_t *framedata = (uint16_t *)&buf[hdr->ofs_frames];
for(int i = 0; i < (int)hdr->num_frames; i++) {
for(int j = 0; j < (int)hdr->num_poses; j++) {
struct iqmpose *p = &poses[j];
quat rotate;
vec3 translate, scale;
translate.x = p->channeloffset[0]; if(p->mask&0x01) translate.x += *framedata++ * p->channelscale[0];
translate.y = p->channeloffset[1]; if(p->mask&0x02) translate.y += *framedata++ * p->channelscale[1];
translate.z = p->channeloffset[2]; if(p->mask&0x04) translate.z += *framedata++ * p->channelscale[2];
rotate.x = p->channeloffset[3]; if(p->mask&0x08) rotate.x += *framedata++ * p->channelscale[3];
rotate.y = p->channeloffset[4]; if(p->mask&0x10) rotate.y += *framedata++ * p->channelscale[4];
rotate.z = p->channeloffset[5]; if(p->mask&0x20) rotate.z += *framedata++ * p->channelscale[5];
rotate.w = p->channeloffset[6]; if(p->mask&0x40) rotate.w += *framedata++ * p->channelscale[6];
scale.x = p->channeloffset[7]; if(p->mask&0x80) scale.x += *framedata++ * p->channelscale[7];
scale.y = p->channeloffset[8]; if(p->mask&0x100) scale.y += *framedata++ * p->channelscale[8];
scale.z = p->channeloffset[9]; if(p->mask&0x200) scale.z += *framedata++ * p->channelscale[9];
// Concatenate each pose with the inverse base pose to avoid doing this at animation time.
// If the joint has a parent, then it needs to be pre-concatenated with its parent's base pose.
// Thus it all negates at animation time like so:
// (parentPose * parentInverseBasePose) * (parentBasePose * childPose * childInverseBasePose) =>
// parentPose * (parentInverseBasePose * parentBasePose) * childPose * childInverseBasePose =>
// parentPose * childPose * childInverseBasePose
mat34 m; compose34(m, translate, normq(rotate), scale);
if(p->parent >= 0) multiply34x3(frames[i*hdr->num_poses + j], baseframe[p->parent], m, inversebaseframe[j]);
else multiply34x2(frames[i*hdr->num_poses + j], m, inversebaseframe[j]);
}
}
const char *str = hdr->ofs_text ? (char *)&buf[hdr->ofs_text] : "";
for(int i = 0; i < (int)hdr->num_anims; i++) {
struct iqmanim *a = &anims[i];
PRINTF("loaded anim[%d]: %s\n", i, &str[a->name]);
}
return true;
}
static
bool model_load_textures(iqm_t *q, const struct iqmheader *hdr) {
textures = textures ? textures : CALLOC(hdr->num_meshes, sizeof(GLuint));
const char *str = hdr->ofs_text ? (char *)&buf[hdr->ofs_text] : "";
for(int i = 0; i < (int)hdr->num_meshes; i++) {
struct iqmmesh *m = &meshes[i];
char* material_name;
int flags = TEXTURE_MIPMAPS|TEXTURE_REPEAT; // LINEAR, NEAREST
int invalid = texture_checker().id;
textures[i] = invalid;
// remove any material+name from materials (.fbx)
// try left token first
if( 1 ) {
material_name = stringf("%s", &str[m->material]);
char* plus = strrchr(material_name, '+');
if (plus) { strcpy(plus, file_ext(material_name)); }
textures[i] = texture(material_name, flags).id;
}
// else try right token
if (textures[i] == invalid) {
material_name = file_normalize( stringf("%s", &str[m->material]) );
char* plus = strrchr(material_name, '+'), *slash = strrchr(material_name, '/');
if (plus) {
strcpy(slash ? slash + 1 : material_name, plus + 1);
textures[i] = texture(material_name, flags).id;
}
}
// else last resort
if (textures[i] == invalid) {
textures[i] = texture(material_name, flags).id; // needed?
}
if( textures[i] != invalid) {
PRINTF("loaded material[%d]: %s\n", i, &str[m->material]);
} else {
PRINTF("fail: material[%d] not found: %s\n", i, &str[m->material]);
PRINTF("warn: using placeholder material[%d]=texture_checker\n", i);
textures[i] = texture_checker().id; // placeholder
}
}
return true;
}
model_t model_from_mem(const void *mem, int len, int flags) {
const char *ptr = (const char *)mem;
static int shaderprog = -1;
if( shaderprog < 0 ) {
const char *symbols[] = { "{{include-shadowmap}}", fs_0_0_shadowmap_lit }; // #define RIM
shaderprog = shader(strlerp(1,symbols,vs_32344443_332_model), strlerp(1,symbols,fs_32_4_model), //fs,
"att_position,att_texcoord,att_normal,att_tangent,att_indexes,att_weights,att_color,att_bitangent","fragColor");
}
iqm_t *q = CALLOC(1, sizeof(iqm_t));
program = shaderprog;
int error = 1;
if( ptr && len ) {
struct iqmheader hdr; memcpy(&hdr, ptr, sizeof(hdr)); ptr += sizeof(hdr);
if( !memcmp(hdr.magic, IQM_MAGIC, sizeof(hdr.magic))) {
lil32p(&hdr.version, (sizeof(hdr) - sizeof(hdr.magic))/sizeof(uint32_t));
if(hdr.version == IQM_VERSION) {
buf = CALLOC(hdr.filesize, sizeof(uint8_t));
memcpy(buf + sizeof(hdr), ptr, hdr.filesize - sizeof(hdr));
error = 0;
if( hdr.num_meshes > 0 && !(flags & MODEL_NO_MESHES) ) error |= !model_load_meshes(q, &hdr);
if( hdr.num_meshes > 0 && !(flags & MODEL_NO_TEXTURES) ) error |= !model_load_textures(q, &hdr);
if( hdr.num_anims > 0 && !(flags & MODEL_NO_ANIMATIONS) ) error |= !model_load_anims(q, &hdr);
if( buf != meshdata && buf != animdata ) FREE(buf);
}
}
}
model_t m = {0};
if( error ) {
PRINTF("Error: cannot load %s", "model");
FREE(q), q = 0;
} else {
// m.boxes = bounds; // <@todo
m.num_meshes = nummeshes;
m.num_triangles = numtris;
m.num_joints = numjoints;
//m.num_poses = numposes;
m.num_anims = numanims;
m.num_frames = numframes;
m.iqm = q;
m.curframe = model_animate(m, 0);
id44(m.pivot);
model_set_state(m);
}
return m;
}
model_t model(const char *filename, int flags) {
int len; // vfs_pushd(filedir(filename))
char *ptr = vfs_load(filename, &len); // + vfs_popd
return model_from_mem( ptr, len, flags );
}
void model_get_bone_pose(model_t m, float curframe, int joint, vec3 *pos, vec3 *from) { // bugs?
if(!m.iqm) return;
iqm_t *q = m.iqm;
// mat34 *mat = &frames[(int)curframe * numjoints];
float *a = outframe[joint];
#if 0
mat34 m34 = {0};
muladd34(m34, outframe[int(att_indexes.x)], att_weights.x);
muladd34(m34, outframe[int(att_indexes.y)], att_weights.y);
muladd34(m34, outframe[int(att_indexes.z)], att_weights.z);
muladd34(m34, outframe[int(att_indexes.w)], att_weights.w);
objPos = vec4(att_position, 1.0) * m34;
#endif
*pos = vec3(a[12], a[13], a[14]);
if (joints[joint].parent >= 0) {
float *b = outframe[joints[joint].parent];
/*
@fixme: do as above
*/
*from = vec3(b[12], b[13], b[14]);
} else {
*from = vec3(0, 0, 0);
}
}
float model_animate_clip(model_t m, float curframe, int minframe, int maxframe, bool loop) {
if(!m.iqm) return -1;
iqm_t *q = m.iqm;
float retframe = -1;
if( numframes > 0 ) {
int frame1 = (int)/*floor*/(curframe);
int frame2 = frame1 + (curframe >= m.curframe ? 1 : -1);
float frameoffset = curframe - frame1;
if( loop ) {
int distance = (maxframe - minframe);
frame1 = frame1 >= maxframe ? minframe : frame1 < minframe ? maxframe - clampf(minframe - frame1, 0, distance) : frame1;
frame2 = frame2 >= maxframe ? minframe : frame2 < minframe ? maxframe - clampf(minframe - frame2, 0, distance) : frame2;
retframe = frame1 + frameoffset;
} else {
frame1 = clampf(frame1, minframe, maxframe);
frame2 = clampf(frame2, minframe, maxframe);
retframe = minf(frame1 + frameoffset, maxframe); // clamp to maxframe
}
mat34 *mat1 = &frames[frame1 * numjoints];
mat34 *mat2 = &frames[frame2 * numjoints];
// @todo: add animation blending and inter-frame blending here
// Interpolate matrixes between the two closest frames and concatenate with
// parent matrix if necessary. Concatenate the result with the inverse of the base pose.
for(int i = 0; i < numjoints; i++) {
mat34 mat; lerp34(mat, mat1[i], mat2[i], frameoffset);
if(joints[i].parent >= 0) multiply34x2(outframe[i], outframe[joints[i].parent], mat);
else copy34(outframe[i], mat);
}
// model_render_skeleton
if(0)
for( int i = 0; i < numjoints; i++ ) {
vec3 pos, from;
model_get_bone_pose(m, curframe, i, &pos, &from);
ddraw_line(pos, from);
}
}
return retframe;
}
float model_animate(model_t m, float curframe) {
if(!m.iqm) return -1;
iqm_t *q = m.iqm;
return model_animate_clip(m, curframe, 0, numframes-1, true);
}
static
void model_draw_call(model_t m) {
if(!m.iqm) return;
iqm_t *q = m.iqm;
glBindVertexArray( vao );
struct iqmtriangle *tris = NULL;
for(int i = 0; i < nummeshes; i++) {
struct iqmmesh *m = &meshes[i];
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[i] );
glUniform1i(glGetUniformLocation(program, "fsDiffTex"), 0 /*<-- unit!*/ );
glDrawElements(GL_TRIANGLES, 3*m->num_triangles, GL_UNSIGNED_INT, &tris[m->first_triangle]);
profile_incstat("drawcalls", +1);
profile_incstat("triangles", +m->num_triangles);
}
glBindVertexArray( 0 );
}
void model_render2(model_t m, mat44 proj, mat44 view, mat44 model, int shader) {
if(!m.iqm) return;
iqm_t *q = m.iqm;
model_set_uniforms(m, shader ? shader : program, proj, view, model);
model_draw_call(m);
}
void model_render(model_t m, mat44 proj, mat44 view, mat44 model) {
model_render2(m, proj, view, model, 0);
}
static
aabb aabb_transform( aabb A, mat44 M) {
// Based on "Transforming Axis-Aligned Bounding Boxes" by Jim Arvo, 1990
aabb B = { {M[12],M[13],M[14]}, {M[12],M[13],M[14]} }; // extract translation from mat44
for( int i = 0; i < 3; i++ )
for( int j = 0; j < 3; j++ ) {
float a = M[i*4+j] * j[&A.min.x]; // use mat33 from mat44
float b = M[i*4+j] * j[&A.max.x]; // use mat33 from mat44
if( a < b ) {
i[&B.min.x] += a;
i[&B.max.x] += b;
} else {
i[&B.min.x] += b;
i[&B.max.x] += a;
}
}
return B;
}
aabb model_aabb(model_t m, mat44 transform) {
iqm_t *q = m.iqm;
if( q && bounds ) {
int f = ( (int)m.curframe ) % (numframes + !numframes);
vec3 bbmin = ptr3(bounds[f].bbmin);
vec3 bbmax = ptr3(bounds[f].bbmax);
return aabb_transform(aabb(bbmin,bbmax), transform);
}
return aabb(vec3(0,0,0),vec3(0,0,0));
}
void model_destroy(model_t m) {
iqm_t *q = m.iqm;
// if(m.mesh) mesh_destroy(m.mesh);
FREE(outframe);
FREE(textures);
FREE(baseframe);
FREE(inversebaseframe);
if(animdata != meshdata) FREE(animdata);
//FREE(meshdata);
FREE(frames);
FREE(buf);
FREE(q);
}
#undef program
#undef meshdata
#undef animdata
#undef nummeshes
#undef numtris
#undef numverts
#undef numjoints
#undef numframes
#undef numanims
#undef meshes
#undef textures
#undef joints
#undef poses
#undef anims
#undef baseframe
#undef inversebaseframe
#undef outframe
#undef frames
#undef vao
#undef ibo
#undef vbo
#undef bonematsoffset
#undef buf
#undef bounds
#endif // RENDER_C
|
fdp.c | #include <math.h>
#include <string.h>
#include <stdio.h>
#include "types.h"
#include "util.h"
#include "vector.h"
#include "atom.h"
#include "random.h"
#include "fdp.h"
#include "main.h"
////////////////////////////////////////////////////////////
// Force-directed placement code.
////////////////////////////////////////////////////////////
#define EPSILON (1.0e-7)
#define MAX_STEPS 1000000
#define SCALE_FORCE (2.0)
#define GRAVITY_FORCE (-0.05)
#define SPRING_FORCE (0.1)
int fdp_ligand(state_t *state)
{
vector3_t v;
double f,s;
int i,j,k=0;
// Is force-directed placement enabled?
if( state->fdp == 1 ) {
// Give some random nudges to the positions to avoid some pathological cases.
for(j=0; j<state->nligand; j++) {
state->ligand[j].pos.s.x += random_U(&(state->random),EPSILON);
state->ligand[j].pos.s.y += random_U(&(state->random),EPSILON);
state->ligand[j].pos.s.z += random_U(&(state->random),EPSILON);
state->ligand[j].pos.s.x -= random_U(&(state->random),EPSILON);
state->ligand[j].pos.s.y -= random_U(&(state->random),EPSILON);
state->ligand[j].pos.s.z -= random_U(&(state->random),EPSILON);
}
for(k=0; k<MAX_STEPS; k++) {
// Clear forces.
for(i=0; i<state->nligand; i++) {
memset(&(state->ligand[i].force),0,sizeof(vector3_t));
}
// Compute force of anti-gravity on atoms.
//#pragma omp parallel for private(i,j,v,f)
for(i=0; i<state->nligand; i++) {
for(j=0; j<state->nligand; j++) {
if( i != j ) {
vector3_sub_vector(&(state->ligand[j].pos),
&(state->ligand[i].pos),
&v);
f = vector3_length(&v);
f = GRAVITY_FORCE / (f*f);
vector3_normalize(&v,&v);
vector3_mult_scalar(&v,&v,f);
vector3_add_vector(&v,
&(state->ligand[i].force),
&(state->ligand[i].force));
}
}
}
// Compute force of bonds on atoms.
for(i=0; i<state->nbonds; i++) {
vector3_sub_vector(&(state->ligand[state->bonds[i][1]].pos),
&(state->ligand[state->bonds[i][0]].pos),
&v);
f = vector3_length(&v);
f = SPRING_FORCE * f;
vector3_normalize(&v,&v);
vector3_mult_scalar(&v,&v,f);
vector3_add_vector(&v,
&(state->ligand[state->bonds[i][0]].force),
&(state->ligand[state->bonds[i][0]].force));
vector3_mult_scalar(&v,&v,-1.0);
vector3_add_vector(&v,
&(state->ligand[state->bonds[i][1]].force),
&(state->ligand[state->bonds[i][1]].force));
}
// Test for convergence.
s = 0.0;
for(i=0; i<state->nligand; i++) {
f = vector3_length(&(state->ligand[i].force));
s += f;
}
if( s < EPSILON ) {
// Converged.
state->fdp = 2;
break;
}
// Update atom positions.
for(i=0; i<state->nligand; i++) {
vector3_mult_scalar(&(state->ligand[i].force),&(state->ligand[i].force),SCALE_FORCE);
vector3_add_vector(&(state->ligand[i].force),
&(state->ligand[i].pos),
&(state->ligand[i].pos));
}
}
if( k >= MAX_STEPS ) {
Warn("fdp_ligand(): Did not converge for %d timesteps.\n",k);
return -k;
}
}
// Return convergence time in steps.
return k;
}
|
GB_unaryop__ainv_uint8_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_uint64
// op(A') function: GB_tran__ainv_uint8_uint64
// C type: uint8_t
// A type: uint64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_uint64
(
uint8_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__minv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__minv_int8_int8
// op(A') function: GB_unop_tran__minv_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__minv_int8_int8
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__minv_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__creal_fp64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__creal_fp64_fc64)
// op(A') function: GB (_unop_tran__creal_fp64_fc64)
// C type: double
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = creal (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = creal (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = creal (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CREAL || GxB_NO_FP64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__creal_fp64_fc64)
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = creal (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = creal (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__creal_fp64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mkl_quantized_conv_ops.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_
#define TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#ifdef INTEL_MKL
namespace tensorflow {
template <class T>
float MklFloatForOneQuantizedLevel(float range_min, float range_max) {
int64 highest = static_cast<int64>(Eigen::NumTraits<T>::highest());
int64 lowest = static_cast<int64>(Eigen::NumTraits<T>::lowest());
// Adjusting for having a symmetric range.
// for example: for 8-bit [-127, 127] as opposed to [-128, 127].
if (lowest < -highest) ++lowest;
const float float_for_one_quantized_level =
(range_max - range_min) / (highest - lowest);
return float_for_one_quantized_level;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
float min_b, float max_b,
float* min_c, float* max_c) {
const float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
const float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b, max_b);
const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest());
const float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
*min_c = c_float_for_one_quant_level * c_lowest;
*max_c = c_float_for_one_quant_level * c_highest;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
const Tensor& min_b_vector,
const Tensor& max_b_vector,
Tensor** min_c_vector,
Tensor** max_c_vector) {
DCHECK(min_b_vector.NumElements() == (*min_c_vector)->NumElements());
DCHECK(max_b_vector.NumElements() == (*max_c_vector)->NumElements());
size_t n_channel = min_b_vector.NumElements();
const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest());
const float* min_b = min_b_vector.flat<float>().data();
const float* max_b = max_b_vector.flat<float>().data();
float* min_c = (*min_c_vector)->flat<float>().data();
float* max_c = (*max_c_vector)->flat<float>().data();
#ifdef ENABLE_ONEDNN_OPENMP
#pragma omp parallel for
#endif // ENABLE_ONEDNN_OPENMP
// TODO: Add eigen parallel_for
for (int64_t n = 0; n < n_channel; ++n) {
float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b[n], max_b[n]);
float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
min_c[n] = c_float_for_one_quant_level * c_lowest;
max_c[n] = c_float_for_one_quant_level * c_highest;
}
}
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of
% pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info));
if (quantize_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ssize_t
y;
/*
Allocate image colormap.
*/
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);;
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AssignImageColors)
#endif
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
(cube_info->quantize_info->colorspace == GRAYColorspace))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
(cube_info->quantize_info->colorspace == GRAYColorspace))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
double
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
pixel;
register double
alpha,
beta,
distance;
register DoublePixelPacket
*magick_restrict q;
register PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(double) (QuantumScale*p->alpha);
beta=(double) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class != PseudoClass)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register double
alpha;
register PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register Quantum
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7*current[u-v].red/16;
pixel.green+=7*current[u-v].green/16;
pixel.blue+=7*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=previous[u+v].alpha/16;
}
pixel.red+=5*previous[u].red/16;
pixel.green+=5*previous[u].green/16;
pixel.blue+=5*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3*previous[u-v].red/16;
pixel.green+=3*previous[u-v].green/16;
pixel.blue+=3*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int,
ExceptionInfo *);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction,ExceptionInfo *exception)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
register Quantum
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=p->weights[i]*p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength*
sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) ResetMagickMemory(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) ResetMagickMemory(cube_info->cache,(-1),sizeof(*cube_info->cache)*
length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) ResetMagickMemory(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
index;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) ResetMagickMemory(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=GetPixelIndex(image,p);
if (image->alpha_trait == BlendPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \
QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait == BlendPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PosterizeImage)
#endif
proceed=SetImageProgress(image,PosterizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->alpha_trait != BlendPixelTrait)
{
if (SetImageGray(image,exception) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
}
if ((image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
{
if ((quantize_info->colorspace != UndefinedColorspace) &&
(quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,quantize_info->colorspace,
exception);
return(MagickTrue);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait == BlendPixelTrait) && (depth > 5))
depth--;
if (SetImageGray(image,exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image if it contains more than the
maximum, otherwise we can disable dithering to improve the performance.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
else
cube_info->quantize_info->dither_method=NoDitherMethod;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
register ssize_t
i;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) ResetMagickMemory(colormap_index,(-1),MaxColormapSize*
sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
nested.c |
// OpenMP Nested Example
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main( int argc, char** argv ) {
omp_set_nested( 1 ); // Enable Nested Parallelism
omp_set_dynamic( 0 ); // Disable Dynamic Threads
// Outer Level Parallel Region - 2 Threads
#pragma omp parallel num_threads( 2 )
{
printf( "Outer Level - You will see this twice.\n" );
// Inner Level Parallel Region - 2 Threads Each
#pragma omp parallel num_threads( 2 )
{
printf( "Inner Level - You will see this four times!\n" );
}
}
return 0;
}
// End nested.c - EWG SDG
|
8700.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11)
#pragma omp parallel for num_threads(2)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_binop__land_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__land_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__land_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int8)
// A*D function (colscale): GB (_AxD__land_int8)
// D*A function (rowscale): GB (_DxB__land_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__land_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__land_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int8)
// C=scalar+B GB (_bind1st__land_int8)
// C=scalar+B' GB (_bind1st_tran__land_int8)
// C=A+scalar GB (_bind2nd__land_int8)
// C=A'+scalar GB (_bind2nd_tran__land_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT8 || GxB_NO_LAND_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__land_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__land_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
utils.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
#pragma once
#include <fcntl.h>
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <string>
#include <memory>
#include <random>
#include <set>
#include <xmmintrin.h>
#ifdef __APPLE__
#else
#include <malloc.h>
#endif
#ifdef _WINDOWS
#include <Windows.h>
typedef HANDLE FileHandle;
#else
#include <unistd.h>
typedef int FileHandle;
#endif
#include "logger.h"
#include "cached_io.h"
#include "common_includes.h"
#include "windows_customizations.h"
#ifdef EXEC_ENV_OLS
#include "content_buf.h"
#include "memory_mapped_files.h"
#endif
// taken from
// https://github.com/Microsoft/BLAS-on-flash/blob/master/include/utils.h
// round up X to the nearest multiple of Y
#define ROUND_UP(X, Y) \
((((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0)) * (Y))
#define DIV_ROUND_UP(X, Y) (((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0))
// round down X to the nearest multiple of Y
#define ROUND_DOWN(X, Y) (((uint64_t)(X) / (Y)) * (Y))
// alignment tests
#define IS_ALIGNED(X, Y) ((uint64_t)(X) % (uint64_t)(Y) == 0)
#define IS_512_ALIGNED(X) IS_ALIGNED(X, 512)
#define IS_4096_ALIGNED(X) IS_ALIGNED(X, 4096)
typedef uint64_t _u64;
typedef int64_t _s64;
typedef uint32_t _u32;
typedef int32_t _s32;
typedef uint16_t _u16;
typedef int16_t _s16;
typedef uint8_t _u8;
typedef int8_t _s8;
namespace diskann {
static const size_t MAX_SIZE_OF_STREAMBUF = 2LL * 1024 * 1024 * 1024;
enum Metric { L2 = 0, INNER_PRODUCT = 1, FAST_L2 = 2, PQ = 3 };
inline void alloc_aligned(void** ptr, size_t size, size_t align) {
*ptr = nullptr;
assert(IS_ALIGNED(size, align));
#ifndef _WINDOWS
*ptr = ::aligned_alloc(align, size);
#else
*ptr = ::_aligned_malloc(size, align); // note the swapped arguments!
#endif
assert(*ptr != nullptr);
}
inline void aligned_free(void* ptr) {
// Gopal. Must have a check here if the pointer was actually allocated by
// _alloc_aligned
if (ptr == nullptr) {
return;
}
#ifndef _WINDOWS
free(ptr);
#else
::_aligned_free(ptr);
#endif
}
inline void GenRandom(std::mt19937& rng, unsigned* addr, unsigned size,
unsigned N) {
for (unsigned i = 0; i < size; ++i) {
addr[i] = rng() % (N - size);
}
std::sort(addr, addr + size);
for (unsigned i = 1; i < size; ++i) {
if (addr[i] <= addr[i - 1]) {
addr[i] = addr[i - 1] + 1;
}
}
unsigned off = rng() % N;
for (unsigned i = 0; i < size; ++i) {
addr[i] = (addr[i] + off) % N;
}
}
// get_bin_metadata functions START
inline void get_bin_metadata_impl(std::basic_istream<char>& reader,
size_t& nrows, size_t& ncols) {
int nrows_32, ncols_32;
reader.read((char*) &nrows_32, sizeof(int));
reader.read((char*) &ncols_32, sizeof(int));
nrows = nrows_32;
ncols = ncols_32;
}
#ifdef EXEC_ENV_OLS
inline void get_bin_metadata(MemoryMappedFiles& files,
const std::string& bin_file, size_t& nrows,
size_t& ncols) {
diskann::cout << "Getting metadata for file: " << bin_file << std::endl;
auto fc = files.getContent(bin_file);
auto cb = ContentBuf((char*) fc._content, fc._size);
std::basic_istream<char> reader(&cb);
get_bin_metadata_impl(reader, nrows, ncols);
}
#endif
inline void get_bin_metadata(const std::string& bin_file, size_t& nrows,
size_t& ncols) {
std::ifstream reader(bin_file.c_str(), std::ios::binary);
get_bin_metadata_impl(reader, nrows, ncols);
}
// get_bin_metadata functions END
template<typename T>
inline std::string getValues(T* data, size_t num) {
std::stringstream stream;
stream << "[";
for (size_t i = 0; i < num; i++) {
stream << std::to_string(data[i]) << ",";
}
stream << "]" << std::endl;
return stream.str();
}
// load_bin functions START
template<typename T>
inline void load_bin_impl(std::basic_istream<char>& reader,
size_t actual_file_size, T*& data, size_t& npts,
size_t& dim) {
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
size_t expected_actual_file_size =
npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != expected_actual_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << actual_file_size
<< " while expected size is " << expected_actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
data = new T[npts * dim];
reader.read((char*) data, npts * dim * sizeof(T));
// diskann::cout << "Last bytes: "
// << getValues<T>(data + (npts - 2) * dim, dim);
// diskann::cout << "Finished reading bin file." << std::endl;
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file,
T*& data, size_t& npts, size_t& dim) {
diskann::cout << "Reading bin file " << bin_file.c_str() << " ..."
<< std::endl;
auto fc = files.getContent(bin_file);
uint32_t t_npts, t_dim;
uint32_t* contentAsIntPtr = (uint32_t*) (fc._content);
t_npts = *(contentAsIntPtr);
t_dim = *(contentAsIntPtr + 1);
npts = t_npts;
dim = t_dim;
auto actual_file_size = npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != fc._size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << fc._size
<< " while expected size is " << actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
data =
(T*) ((char*) fc._content + 2 * sizeof(uint32_t)); // No need to copy!
}
#endif
inline void wait_for_keystroke() {
int a;
std::cout << "Press any number to continue.." << std::endl;
std::cin >> a;
}
template<typename T>
inline void load_bin(const std::string& bin_file, T*& data, size_t& npts,
size_t& dim) {
// OLS
//_u64 read_blk_size = 64 * 1024 * 1024;
// cached_ifstream reader(bin_file, read_blk_size);
// size_t actual_file_size = reader.get_file_size();
// END OLS
diskann::cout << "Reading bin file " << bin_file.c_str() << " ..."
<< std::endl;
std::ifstream reader(bin_file, std::ios::binary | std::ios::ate);
uint64_t fsize = reader.tellg();
reader.seekg(0);
load_bin_impl<T>(reader, fsize, data, npts, dim);
}
// load_bin functions END
inline void load_truthset(const std::string& bin_file, uint32_t*& ids,
float*& dists, size_t& npts, size_t& dim) {
_u64 read_blk_size = 64 * 1024 * 1024;
cached_ifstream reader(bin_file, read_blk_size);
diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..."
<< std::endl;
size_t actual_file_size = reader.get_file_size();
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
int truthset_type = -1; // 1 means truthset has ids and distances, 2 means
// only ids, -1 is error
size_t expected_file_size_with_dists =
2 * npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_with_dists)
truthset_type = 1;
size_t expected_file_size_just_ids =
npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_just_ids)
truthset_type = 2;
if (truthset_type == -1) {
std::stringstream stream;
stream << "Error. File size mismatch. File should have bin format, with "
"npts followed by ngt followed by npts*ngt ids and optionally "
"followed by npts*ngt distance values; actual size: "
<< actual_file_size
<< ", expected: " << expected_file_size_with_dists << " or "
<< expected_file_size_just_ids;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
ids = new uint32_t[npts * dim];
reader.read((char*) ids, npts * dim * sizeof(uint32_t));
if (truthset_type == 1) {
dists = new float[npts * dim];
reader.read((char*) dists, npts * dim * sizeof(float));
}
}
inline void prune_truthset_for_range(const std::string& bin_file, float range, std::vector<std::vector<_u32>> &groundtruth,
size_t& npts) {
_u64 read_blk_size = 64 * 1024 * 1024;
cached_ifstream reader(bin_file, read_blk_size);
diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..."
<< std::endl;
size_t actual_file_size = reader.get_file_size();
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
_u64 dim = (unsigned) dim_i32;
_u32* ids;
float* dists;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
int truthset_type = -1; // 1 means truthset has ids and distances, 2 means
// only ids, -1 is error
size_t expected_file_size_with_dists =
2 * npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_with_dists)
truthset_type = 1;
if (truthset_type == -1) {
std::stringstream stream;
stream << "Error. File size mismatch. File should have bin format, with "
"npts followed by ngt followed by npts*ngt ids and optionally "
"followed by npts*ngt distance values; actual size: "
<< actual_file_size
<< ", expected: " << expected_file_size_with_dists;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
ids = new uint32_t[npts * dim];
reader.read((char*) ids, npts * dim * sizeof(uint32_t));
if (truthset_type == 1) {
dists = new float[npts * dim];
reader.read((char*) dists, npts * dim * sizeof(float));
}
float min_dist = std::numeric_limits<float>::max();
float max_dist = 0;
groundtruth.resize(npts);
for (_u32 i = 0; i < npts; i++) {
groundtruth[i].clear();
for (_u32 j = 0; j < dim; j++) {
if (dists[i*dim + j] <= range) {
groundtruth[i].emplace_back(ids[i*dim+j]);
}
min_dist = min_dist > dists[i*dim+j] ? dists[i*dim + j] : min_dist;
max_dist = max_dist < dists[i*dim+j] ? dists[i*dim + j] : max_dist;
}
//std::cout<<groundtruth[i].size() << " " ;
}
std::cout<<"Min dist: " << min_dist <<", Max dist: "<< max_dist << std::endl;
delete[] ids;
delete[] dists;
}
inline void load_range_truthset(const std::string& bin_file, std::vector<std::vector<_u32>> &groundtruth, _u64 & gt_num) {
_u64 read_blk_size = 64 * 1024 * 1024;
cached_ifstream reader(bin_file, read_blk_size);
diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..."
<< std::endl;
size_t actual_file_size = reader.get_file_size();
int npts_u32, total_u32;
reader.read((char*) &npts_u32, sizeof(int));
reader.read((char*) &total_u32, sizeof(int));
gt_num = (_u64) npts_u32;
_u64 total_res = (_u64) total_u32;
diskann::cout << "Metadata: #pts = " << gt_num << ", #total_results = " << total_res << "..."
<< std::endl;
size_t expected_file_size =
2*sizeof(_u32) + gt_num*sizeof(_u32) + total_res*sizeof(_u32);
if (actual_file_size != expected_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch in range truthset. actual size: "
<< actual_file_size
<< ", expected: " << expected_file_size;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
groundtruth.clear();
groundtruth.resize(gt_num);
std::vector<_u32> gt_count(gt_num);
reader.read((char*) gt_count.data(), sizeof(_u32)*gt_num);
std::vector<_u32> gt_stats(gt_count);
std::sort(gt_stats.begin(), gt_stats.end());
std::cout<<"GT count percentiles:" << std::endl;
for (_u32 p = 0; p < 100; p += 5)
std::cout << "percentile " << p << ": "
<< gt_stats[std::floor((p / 100.0) * gt_num)] << std::endl;
std::cout << "percentile 100"
<< ": " << gt_stats[gt_num - 1] << std::endl;
for (_u32 i = 0; i < gt_num; i++) {
groundtruth[i].clear();
groundtruth[i].resize(gt_count[i]);
if (gt_count[i]!=0)
reader.read((char*) groundtruth[i].data(), sizeof(_u32)*gt_count[i]);
// debugging code
/* if (i < 10) {
std::cout<<gt_count[i] <<" nbrs, ids: ";
for (auto &x : groundtruth[i])
std::cout<<x <<" ";
std::cout<<std::endl;
} */
}
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file,
std::unique_ptr<T[]>& data, size_t& npts, size_t& dim) {
T* ptr;
load_bin<T>(files, bin_file, ptr, npts, dim);
data.reset(ptr);
}
#endif
template<typename T>
inline void load_bin(const std::string& bin_file, std::unique_ptr<T[]>& data,
size_t& npts, size_t& dim) {
T* ptr;
load_bin<T>(bin_file, ptr, npts, dim);
data.reset(ptr);
}
template<typename T>
inline void save_bin(const std::string& filename, T* data, size_t npts,
size_t ndims) {
std::ofstream writer(filename, std::ios::binary | std::ios::out);
diskann::cout << "Writing bin: " << filename.c_str() << std::endl;
int npts_i32 = (int) npts, ndims_i32 = (int) ndims;
writer.write((char*) &npts_i32, sizeof(int));
writer.write((char*) &ndims_i32, sizeof(int));
diskann::cout << "bin: #pts = " << npts << ", #dims = " << ndims
<< ", size = " << npts * ndims * sizeof(T) + 2 * sizeof(int)
<< "B" << std::endl;
// data = new T[npts_u64 * ndims_u64];
writer.write((char*) data, npts * ndims * sizeof(T));
writer.close();
diskann::cout << "Finished writing bin." << std::endl;
}
// load_aligned_bin functions START
template<typename T>
inline void load_aligned_bin_impl(std::basic_istream<char>& reader,
size_t actual_file_size, T*& data,
size_t& npts, size_t& dim,
size_t& rounded_dim) {
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
//check file size
size_t expected_actual_file_size =
npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != expected_actual_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << actual_file_size
<< " while expected size is " << expected_actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str() << std::endl;
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
rounded_dim = ROUND_UP(dim, 8);
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim
<< ", aligned_dim = " << rounded_dim << "..." << std::flush;
size_t allocSize = npts * rounded_dim * sizeof(T);
diskann::cout << "allocating aligned memory, " << allocSize << " bytes..."
<< std::flush;
alloc_aligned(((void**) &data), allocSize, 8 * sizeof(T));
diskann::cout << "done. Copying data..." << std::flush;
for (size_t i = 0; i < npts; i++) {
reader.read((char*) (data + i * rounded_dim), dim * sizeof(T));
memset(data + i * rounded_dim + dim, 0, (rounded_dim - dim) * sizeof(T));// eliminate the holes
}
diskann::cout << " done." << std::endl;
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_aligned_bin(MemoryMappedFiles& files,
const std::string& bin_file, T*& data,
size_t& npts, size_t& dim, size_t& rounded_dim) {
diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush;
FileContent fc = files.getContent(bin_file);
ContentBuf buf((char*) fc._content, fc._size);
std::basic_istream<char> reader(&buf);
size_t actual_file_size = fc._size;
load_aligned_bin_impl(reader, actual_file_size, data, npts, dim,
rounded_dim);
}
#endif
template<typename T>
inline void load_aligned_bin(const std::string& bin_file, T*& data,
size_t& npts, size_t& dim, size_t& rounded_dim) {
diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush;
// START OLS
// _u64 read_blk_size = 64 * 1024 * 1024;
// cached_ifstream reader(bin_file, read_blk_size);
// size_t actual_file_size = reader.get_file_size();
// END OLS
std::ifstream reader(bin_file, std::ios::binary | std::ios::ate);
uint64_t fsize = reader.tellg();
reader.seekg(0);
load_aligned_bin_impl(reader, fsize, data, npts, dim, rounded_dim);
}
template<typename InType, typename OutType>
void convert_types(const InType* srcmat, OutType* destmat, size_t npts,
size_t dim) {
#pragma omp parallel for schedule(static, 65536)
for (int64_t i = 0; i < (_s64) npts; i++) {
for (uint64_t j = 0; j < dim; j++) {
destmat[i * dim + j] = (OutType) srcmat[i * dim + j];
}
}
}
// this function will take in_file of n*d dimensions and save the output as a
// floating point matrix
// with n*(d+1) dimensions. All vectors are scaled by a large value M so that
// the norms are <=1 and the final coordinate is set so that the resulting
// norm (in d+1 coordinates) is equal to 1 this is a classical transformation
// from MIPS to L2 search from "On Symmetric and Asymmetric LSHs for Inner
// Product Search" by Neyshabur and Srebro
template<typename T>
float prepare_base_for_inner_products(const std::string in_file,
const std::string out_file) {
std::cout << "Pre-processing base file by adding extra coordinate"
<< std::endl;
std::ifstream in_reader(in_file.c_str(), std::ios::binary);
std::ofstream out_writer(out_file.c_str(), std::ios::binary);
_u64 npts, in_dims, out_dims;
float max_norm = 0;
_u32 npts32, dims32;
in_reader.read((char*) &npts32, sizeof(uint32_t));
in_reader.read((char*) &dims32, sizeof(uint32_t));
npts = npts32;
in_dims = dims32;
out_dims = in_dims + 1;
_u32 outdims32 = (_u32) out_dims;
out_writer.write((char*) &npts32, sizeof(uint32_t));
out_writer.write((char*) &outdims32, sizeof(uint32_t));
size_t BLOCK_SIZE = 100000;
size_t block_size = npts <= BLOCK_SIZE ? npts : BLOCK_SIZE;
std::unique_ptr<T[]> in_block_data =
std::make_unique<T[]>(block_size * in_dims);
std::unique_ptr<float[]> out_block_data =
std::make_unique<float[]>(block_size * out_dims);
std::memset(out_block_data.get(), 0, sizeof(float) * block_size * out_dims);
_u64 num_blocks = DIV_ROUND_UP(npts, block_size);
std::vector<float> norms(npts, 0);
for (_u64 b = 0; b < num_blocks; b++) {
_u64 start_id = b * block_size;
_u64 end_id = (b + 1) * block_size < npts ? (b + 1) * block_size : npts;
_u64 block_pts = end_id - start_id;
in_reader.read((char*) in_block_data.get(),
block_pts * in_dims * sizeof(T));
for (_u64 p = 0; p < block_pts; p++) {
for (_u64 j = 0; j < in_dims; j++) {
norms[start_id + p] +=
in_block_data[p * in_dims + j] * in_block_data[p * in_dims + j];
}
max_norm =
max_norm > norms[start_id + p] ? max_norm : norms[start_id + p];
}
}
max_norm = std::sqrt(max_norm);
in_reader.seekg(2 * sizeof(_u32), std::ios::beg);
for (_u64 b = 0; b < num_blocks; b++) {
_u64 start_id = b * block_size;
_u64 end_id = (b + 1) * block_size < npts ? (b + 1) * block_size : npts;
_u64 block_pts = end_id - start_id;
in_reader.read((char*) in_block_data.get(),
block_pts * in_dims * sizeof(T));
for (_u64 p = 0; p < block_pts; p++) {
for (_u64 j = 0; j < in_dims; j++) {
out_block_data[p * out_dims + j] =
in_block_data[p * in_dims + j] / max_norm;
}
float res = 1 - (norms[start_id + p] / (max_norm * max_norm));
res = res <= 0 ? 0 : std::sqrt(res);
out_block_data[p * out_dims + out_dims - 1] = res;
}
out_writer.write((char*) out_block_data.get(),
block_pts * out_dims * sizeof(float));
}
out_writer.close();
return max_norm;
}
// plain saves data as npts X ndims array into filename
template<typename T>
void save_Tvecs(const char* filename, T* data, size_t npts, size_t ndims) {
std::string fname(filename);
// create cached ofstream with 64MB cache
cached_ofstream writer(fname, 64 * 1048576);
unsigned dims_u32 = (unsigned) ndims;
// start writing
for (uint64_t i = 0; i < npts; i++) {
// write dims in u32
writer.write((char*) &dims_u32, sizeof(unsigned));
// get cur point in data
T* cur_pt = data + i * ndims;
writer.write((char*) cur_pt, ndims * sizeof(T));
}
}
// NOTE :: good efficiency when total_vec_size is integral multiple of 64
inline void prefetch_vector(const char* vec, size_t vecsize) {
size_t max_prefetch_size = (vecsize / 64) * 64;
for (size_t d = 0; d < max_prefetch_size; d += 64)
_mm_prefetch((const char*) vec + d, _MM_HINT_T0);
}
// NOTE :: good efficiency when total_vec_size is integral multiple of 64
inline void prefetch_vector_l2(const char* vec, size_t vecsize) {
size_t max_prefetch_size = (vecsize / 64) * 64;
for (size_t d = 0; d < max_prefetch_size; d += 64)
_mm_prefetch((const char*) vec + d, _MM_HINT_T1);
}
}; // namespace diskann
struct PivotContainer {
PivotContainer() = default;
PivotContainer(size_t pivo_id, float pivo_dist)
: piv_id{pivo_id}, piv_dist{pivo_dist} {
}
bool operator<(const PivotContainer& p) const {
return p.piv_dist < piv_dist;
}
bool operator>(const PivotContainer& p) const {
return p.piv_dist > piv_dist;
}
size_t piv_id;
float piv_dist;
};
inline bool file_exists(const std::string& name) {
struct stat buffer;
auto val = stat(name.c_str(), &buffer);
diskann::cout << " Stat(" << name.c_str() << ") returned: " << val
<< std::endl;
return (val == 0);
}
inline _u64 get_file_size(const std::string& fname) {
std::ifstream reader(fname, std::ios::binary | std::ios::ate);
if (!reader.fail() && reader.is_open()) {
_u64 end_pos = reader.tellg();
diskann::cout << " Tellg: " << reader.tellg() << " as u64: " << end_pos
<< std::endl;
reader.close();
return end_pos;
} else {
diskann::cout << "Could not open file: " << fname << std::endl;
return 0;
}
}
inline bool validate_file_size(const std::string& name) {
std::ifstream in(std::string(name), std::ios::binary);
in.seekg(0, in.end);
size_t actual_file_size = in.tellg();
in.seekg(0, in.beg);
size_t expected_file_size;
in.read((char*) &expected_file_size, sizeof(uint64_t));
if (actual_file_size != expected_file_size) {
diskann::cout << "Error loading" << name
<< ". Expected "
"size (metadata): "
<< expected_file_size
<< ", actual file size : " << actual_file_size
<< ". Exitting." << std::endl;
in.close();
return false;
}
in.close();
return true;
}
#ifdef _WINDOWS
#include <intrin.h>
#include <Psapi.h>
inline void printProcessMemory(const char* message) {
PROCESS_MEMORY_COUNTERS counters;
HANDLE h = GetCurrentProcess();
GetProcessMemoryInfo(h, &counters, sizeof(counters));
diskann::cout << message << " [Peaking Working Set size: "
<< counters.PeakWorkingSetSize * 1.0 / (1024 * 1024 * 1024)
<< "GB Working set size: "
<< counters.WorkingSetSize * 1.0 / (1024 * 1024 * 1024)
<< "GB Private bytes "
<< counters.PagefileUsage * 1.0 / (1024 * 1024 * 1024) << "GB]"
<< std::endl;
}
#else
// need to check and change this
inline bool avx2Supported() {
return true;
}
inline void printProcessMemory(const char* message) {
diskann::cout << message << std::endl;
}
#endif
extern bool AvxSupportedCPU;
extern bool Avx2SupportedCPU;
|
nbodysystem.h | #include <cstdio>
#include <cassert>
#include <cmath>
#include <vector>
struct NbodySystem{
long nbody;
long num_step, num_bstep;
long num_step_tot, num_bstep_tot;
typedef dd_real real_type;
typedef qvec3 vect_type;
real_type eps2;
real_type tsys;
real_type init_energy, prev_energy;
real_type dtmax;
real_type eta, eta_s;
std::vector<Particle> ptcl;
std::vector<Predictor> pred;
std::vector<Force> force;
void reset_counters(){
init_energy = prev_energy;
tsys = 0.0;
num_step_tot = num_bstep_tot = 0;
for(int i=0; i<nbody; i++) ptcl[i].tlast = tsys;
puts("reset done!");
}
#if 0
void read_snapshot_masaki(const char *filename){
int nread;
FILE *fp = fopen(filename, "r");
assert(fp);
int snapid;
long nbody;
nread = fscanf(fp, "%d %ld %lf", &snapid, &nbody, &tsys);
this->nbody = nbody;
assert(3 == nread);
fprintf(stderr, "read snapshot (form M.I.), n = %ld, t = %f\n", nbody, tsys);
ptcl .resize(nbody);
pred .resize(nbody);
force.resize(nbody);
for(int i=0; i<nbody; i++){
long id;
double mass, pot;
dvec3 pos, vel;
nread = fscanf(fp, "%ld %lA %lA %lA %lA %lA %lA %lA %lA",
&id, &mass,
&pos.x, &pos.y, &pos.z,
&vel.x, &vel.y, &vel.z,
&pot);
assert(9 == nread);
ptcl[i].id = id;
ptcl[i].mass = mass;
ptcl[i].coord[0] = qvec3(pos);
ptcl[i].coord[1] = qvec3(vel);
}
fclose(fp);
const real_type eps = 4.0 / nbody;
this->eps2 = eps * eps;
num_step = num_bstep = 0;
num_step_tot = num_bstep_tot = 0;
}
#endif
real_type get_mass(const int i) const {
return pred[i].mass;
}
template <int p>
vect_type get_coord(const int i) const {
return pred[i].coord[p];
}
template <int p>
void set_force(const int i, const vect_type &f){
force[i].force[p] = f;
}
template <typename PTCL> // Particle or Predictor
real_type calc_energy(const std::vector<PTCL> &p){
real_type pe = 0.0;
real_type ke2 = 0.0;
for(int i=0; i<nbody; i++){
real_type phi = 0.0;
for(int j=i+1; j<nbody; j++){
vect_type dr = p[j].coord[0] - p[i].coord[0];
// real_type rinv = 1.0 / sqrt(eps2 + dr*dr);
real_type rinv = rsqrt(eps2 + dr*dr);
phi -= p[j].mass * rinv;
}
pe += p[i].mass * phi;
}
for(int i=0; i<nbody; i++){
ke2 += p[i].mass * p[i].coord[1].norm2();
}
return pe + 0.5 * ke2;
}
real_type calc_energy_from_ptcl(){
return calc_energy(ptcl);
}
real_type calc_energy_from_pred(){
return calc_energy(pred);
}
void calc_force_on_first_nact(int nact){
#pragma omp parallel for
for(int i=0; i<nact; i++){
calc_force_on_i <NbodySystem, real_type, vect_type>
(*this, nbody, i, this->eps2);
}
}
void init_force(){
const int niter = (Particle::NFORCE+1)/2;
for(int n=0; n<niter; n++){
for(int i=0; i<nbody; i++){
pred[i].zero_predict(ptcl[i]);
}
calc_force_on_first_nact(nbody);
for(int i=0; i<nbody; i++){
force[i].init_assign(ptcl[i]);
}
}
}
void set_fixed_dt(const real_type dt){
for(int i=0; i<nbody; i++){
ptcl[i].tlast = tsys;
ptcl[i].dt = dt;
}
}
void predict_all(){
for(int i=0; i<nbody; i++){
pred[i].predict(tsys, ptcl[i]);
}
}
void round_predictor(){
for(int i=0; i<nbody; i++){
for(int j=0; j<Particle::NFORCE; j++){
pred[i].coord[j].x.x[1] = 0.0;
pred[i].coord[j].y.x[1] = 0.0;
pred[i].coord[j].z.x[1] = 0.0;
}
}
}
void correct_and_feedback(){
for(int i=0; i<nbody; i++){
Corrector corr;
corr.correct (ptcl[i], force[i]);
corr.feedback (pred[i]);
}
}
void correct_and_commit(){
for(int i=0; i<nbody; i++){
Corrector corr;
corr.correct (ptcl[i], force[i]);
corr.interpolate(ptcl[i], force[i]);
#if 0
corr.taylor_test(ptcl[i]);
puts("");
// exit(0);
#endif
corr.commit (ptcl[i]);
}
}
void integrate_pec_nth(const int n, const real_type dt){
tsys += dt;
predict_all();
for(int iter=0; iter<n-1; iter++){
calc_force_on_first_nact(nbody);
correct_and_feedback();
}
calc_force_on_first_nact(nbody);
correct_and_commit();
}
};
|
OMP.c | #define CHUNK 1024*1024 // Run CHUNK iterations and check error
#define LOG 1024 // Print progress each LOG iterations
#define LIMIT 1024*1024 // LIMIT of iterations
#include "../common.h"
int main(int argc, char *argv[]) {
unsigned int digits;
unsigned int threads;
double precision;
getParams(argc, argv, &threads, &digits, &precision);
double sum= 0.0, pi, error= 1.0;
omp_set_num_threads(threads);
unsigned long i = 0;
while (error > precision && i < LIMIT) {
#pragma omp parallel for reduction(+:sum)
for (unsigned long n = i * CHUNK; n < (i + 1) * CHUNK; ++n) {
if (n % 2 == 0)
sum += 1.0 / ((n << 1) + 1);
else
sum -= 1.0 / ((n << 1) + 1);
}
pi = 4.0 * sum;
error = getError(pi);
printLog(precision, pi, error, ++i);
}
return EXIT_SUCCESS;
}
|
GB_transpose.c | //------------------------------------------------------------------------------
// GB_transpose: C=A' or C=op(A'), with typecasting
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// CALLS: GB_builder
// Transpose a matrix, C=A', and optionally apply a unary operator and/or
// typecast the values. The transpose may be done in place, in which case C or
// A are modified in place. If the matrix to be transposed has more than one
// vector, it may have jumbled indices in its vectors, which must be sorted.
// If the input matrix has a single vector, it must be already sorted on input.
// The input matrix may have shallow components (even if in place), and the
// output may also have shallow components (even in the input matrix is not
// shallow).
// This function is CSR/CSC agnostic; it sets the output matrix format from
// C_is_csc but otherwise ignores the CSR/CSC type of A and C.
// If A_in is NULL, then C = (*Chandle) is transposed in place. If out of
// memory, (*Chandle) is always returned as NULL, which frees the input matrix
// C if the transpose is done in place.
// If A_in is not NULL and Chandle is NULL, then A is modified in place, and
// the A_in matrix is not freed when done.
// The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is
// m-by-n, then at most O(e/n) threads are used. For many matrices, e is O(n),
// although the constant can be high. The qsort method is more scalable, but
// not as fast with a modest number of threads.
#include "GB_transpose.h"
#include "GB_build.h"
#include "GB_apply.h"
#define GB_FREE_WORK \
{ \
GB_FREE (Count) ; \
} \
// free prior content of A, if transpose is done in place
#define GB_FREE_IN_PLACE_A \
{ \
if (in_place) \
{ \
/* A is being transposed in placed */ \
/* free prior content of A but not &A itself */ \
if (!Ap_shallow) GB_FREE (Ap) ; \
if (!Ah_shallow) GB_FREE (Ah) ; \
if (!Ai_shallow) GB_FREE (Ai) ; \
if (!Ax_shallow) GB_FREE (Ax) ; \
} \
else \
{ \
/* A is not modified; it is purely an input matrix */ \
; \
} \
}
// free the new C matrix, unless C=A' is being done in place of A
#define GB_FREE_C \
{ \
if (!in_place_A) \
{ \
/* free all of C and all its contents &C */ \
GB_MATRIX_FREE (Chandle) ; \
} \
}
// free both A (if in place) and C (if not in place of A)
#define GB_FREE_A_AND_C \
{ \
GB_FREE_IN_PLACE_A ; \
GB_FREE_C ; \
}
//------------------------------------------------------------------------------
// GB_transpose
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_transpose // C=A', C=(ctype)A or C=op(A')
(
GrB_Matrix *Chandle, // output matrix C, possibly modified in place
GrB_Type ctype, // desired type of C; if NULL use A->type.
// ignored if op is present (cast to op->ztype)
const bool C_is_csc, // desired CSR/CSC format of C
const GrB_Matrix A_in, // input matrix
// no operator is applied if both op1 and op2 are NULL
const GrB_UnaryOp op1_in, // unary operator to apply
const GrB_BinaryOp op2_in, // binary operator to apply
const GxB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, binop(x,A) else binop(A,y)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs and determine if transpose is done in place
//--------------------------------------------------------------------------
bool in_place_C, in_place_A ;
GrB_Matrix A, C ;
if (A_in == NULL)
{
//----------------------------------------------------------------------
// C = C' ; &C is transposed in place
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, NULL, op) ;
// C=A' is transposed in place, in the matrix C.
// The matrix C is freed if an error occurs and C is set to NULL.
ASSERT (Chandle != NULL) ; // at least &C or A must be non-NULL
A = (*Chandle) ;
C = A ; // C must be freed if an error occurs
in_place_C = true ; // C is modified in place
in_place_A = false ;
ASSERT (A == C && A == (*Chandle)) ;
}
else if (Chandle == NULL || (*Chandle) == A_in)
{
//----------------------------------------------------------------------
// A = A' ; A is transposed in place; reuse the header of A
//----------------------------------------------------------------------
// GB_transpose (NULL, ctype, csc, A, op) ;
// GB_transpose (&A, ctype, csc, A, op) ;
// C=A' is transposed in place, in the matrix A.
// The matrix A_in is not freed if an error occurs.
A = A_in ;
Chandle = &A ; // C must not be freed if an error occurs
C = A ;
in_place_C = false ;
in_place_A = true ; // A is modified in place
ASSERT (A == C && A == (*Chandle)) ;
}
else
{
//----------------------------------------------------------------------
// C = A' ; C and A are different
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, A, op) ;
// C and A are both non-NULL, and not aliased.
// C=A' where C is a new matrix constructed here.
// The matrix C is freed if an error occurs, and C is set to NULL.
A = A_in ;
C = NULL ;
(*Chandle) = NULL ; // C must be allocated; freed on error
in_place_C = false ; // C and A are different matrices
in_place_A = false ;
ASSERT (A != C && A != (*Chandle)) ;
}
bool in_place = (in_place_A || in_place_C) ;
ASSERT_MATRIX_OK_OR_JUMBLED (A, "A input for GB_transpose", GB0) ;
ASSERT_TYPE_OK_OR_NULL (ctype, "ctype for GB_transpose", GB0) ;
ASSERT_UNARYOP_OK_OR_NULL (op1_in, "unop for GB_transpose", GB0) ;
ASSERT_BINARYOP_OK_OR_NULL (op2_in, "binop for GB_transpose", GB0) ;
ASSERT_SCALAR_OK_OR_NULL (scalar, "scalar for GB_transpose", GB0) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
//--------------------------------------------------------------------------
// determine the number of threads to use here
//--------------------------------------------------------------------------
int64_t anz = GB_NNZ (A) ;
int64_t anvec = A->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Type atype = A->type ;
size_t asize = atype->size ;
GB_Type_code acode = atype->code ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t aplen = A->plen ;
bool A_is_hyper = A->is_hyper ;
double A_hyper_ratio = A->hyper_ratio ;
int64_t anzmax = A->nzmax ;
// if in place, these must be freed when done, whether successful or not
int64_t *GB_RESTRICT Ap = A->p ;
int64_t *GB_RESTRICT Ah = A->h ;
int64_t *GB_RESTRICT Ai = A->i ;
GB_void *GB_RESTRICT Ax = (GB_void *) A->x ;
bool Ap_shallow = A->p_shallow ;
bool Ah_shallow = A->h_shallow ;
bool Ai_shallow = A->i_shallow ;
bool Ax_shallow = A->x_shallow ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
int nth = GB_nthreads (avdim, chunk, nthreads_max) ;
int ntasks = (nth == 1) ? 1 : (8 * nth) ;
ntasks = GB_IMIN (ntasks, avdim) ;
ntasks = GB_IMAX (ntasks, 1) ;
int64_t *GB_RESTRICT Count = NULL ; // size ntasks+1, if allocated
if (anz > 0 && avdim != 1 && avlen == 1)
{
// Count is only used in one case below
Count = GB_CALLOC (ntasks+1, int64_t) ;
if (Count == NULL)
{
// out of memory
GB_FREE_C ;
return (GB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// determine the type of C and get the unary or binary operator
//--------------------------------------------------------------------------
// If a unary or binary operator is present, C is always returned as
// the ztype of the operator. The input ctype is ignored.
GrB_UnaryOp op1 = NULL ;
GrB_BinaryOp op2 = NULL ;
if (op1_in != NULL)
{
// get the unary operator
if (atype == op1_in->xtype && op1_in->opcode == GB_IDENTITY_opcode)
{
// op1 is a built-in identity operator, with the same type as A, so
// do not apply the operator and do not typecast.
ctype = atype ;
}
else
{
// apply the operator, z=op1(x)
op1 = op1_in ;
ctype = op1->ztype ;
}
}
else if (op2_in != NULL)
{
// get the binary operator
GrB_Type op2_intype = binop_bind1st ? op2_in->xtype : op2_in->ytype ;
GB_Opcode opcode = op2_in->opcode ;
// only GB_apply calls GB_transpose with op2_in, and it ensures this
// condition holds: the first(A,y), second(x,A), and any(...) have
// been renamed to identity(A), so these cases do not occur here.
ASSERT (!
((opcode == GB_ANY_opcode) ||
(opcode == GB_FIRST_opcode && !binop_bind1st) ||
(opcode == GB_SECOND_opcode && binop_bind1st))) ;
// apply the operator, z=op2(A,y) or op2(x,A)
op2 = op2_in ;
ctype = op2->ztype ;
}
else
{
// no operator
if (ctype == NULL)
{
// no typecasting if ctype is NULL
ctype = atype ;
}
}
GB_Type_code ccode = ctype->code ;
size_t csize = ctype->size ;
//--------------------------------------------------------------------------
// C = A'
//--------------------------------------------------------------------------
ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ;
bool allocate_new_Cx = (ctype != atype) || (op1 != NULL) || (op2 != NULL) ;
if (anz == 0)
{
//======================================================================
// quick return if A is empty
//======================================================================
GB_FREE_IN_PLACE_A ;
// A is empty; create a new empty matrix C, with the new type and
// dimensions. C is hypersparse for now but may convert when
// returned.
info = GB_create (Chandle, ctype, avdim, avlen, GB_Ap_calloc,
C_is_csc, GB_FORCE_HYPER, A_hyper_ratio, 1, 1, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
ASSERT_MATRIX_OK (*Chandle, "C transpose empty", GB0) ;
}
else if (avdim == 1)
{
//======================================================================
// transpose a "column" vector into a "row"
//======================================================================
// transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen).
// A must be already sorted on input
ASSERT_MATRIX_OK (A, "the vector A must already be sorted", GB0) ;
//----------------------------------------------------------------------
// allocate space
//----------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
info = GB_new (Chandle, ctype, 1, avlen, GB_Ap_null, C_is_csc,
GB_FORCE_HYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
GB_void *GB_RESTRICT Cx = NULL ;
int64_t *GB_RESTRICT Cp = GB_MALLOC (anz+1, int64_t) ;
int64_t *GB_RESTRICT Ci = GB_CALLOC (anz , int64_t) ;
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
Cx = GB_MALLOC (anz * ctype->size, GB_void) ;
}
if (Cp == NULL || Ci == NULL || (allocate_new_Cx && (Cx == NULL)))
{
// out of memory
GB_FREE (Cp) ;
GB_FREE (Ci) ;
GB_FREE (Cx) ;
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// the transpose will now succeed; fill the content of C
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL)
{
// Cx = op ((op->xtype) Ax)
C->x = Cx ; C->x_shallow = false ;
GB_apply_op ((GB_void *) Cx,
op1, op2, scalar, binop_bind1st,
(const GB_void *) Ax, atype, anz, Context) ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ; C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, asize, anz, 1) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A.
C->x = Ax ; C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
// each entry in A becomes a non-empty vector in C
C->h = Ai ; C->h_shallow = (in_place) ? Ai_shallow : true ;
Ai = NULL ; // do not free prior Ai
C->nzmax = anz ;
// C->p = 0:anz and C->i = zeros (1,anz), newly allocated
C->plen = anz ;
C->nvec = anz ;
C->nvec_nonempty = anz ;
C->i = Ci ; C->i_shallow = false ;
C->p = Cp ; C->p_shallow = false ;
// fill the vector pointers C->p
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k <= anz ; k++)
{
Cp [k] = k ;
}
C->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// free prior space
//----------------------------------------------------------------------
GB_FREE_IN_PLACE_A ;
}
else if (avlen == 1)
{
//======================================================================
// transpose a "row" into a "column" vector
//======================================================================
// transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1).
// if A->vlen is 1, all vectors of A are implicitly sorted
ASSERT_MATRIX_OK (A, "1-by-n input A already sorted", GB0) ;
//----------------------------------------------------------------------
// allocate space
//----------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is NON-hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
info = GB_new (Chandle, ctype, avdim, 1, GB_Ap_null, C_is_csc,
GB_FORCE_NONHYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
GB_void *GB_RESTRICT Cx = NULL ;
int64_t *GB_RESTRICT Cp ;
int64_t *GB_RESTRICT Ci = NULL ;
Cp = GB_CALLOC (2, int64_t) ;
bool allocate_new_Ci = (!A_is_hyper) ;
if (allocate_new_Ci)
{
// A is not hypersparse, so new space is needed for Ci
Ci = GB_MALLOC (anz, int64_t) ;
}
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
Cx = GB_MALLOC (anz * ctype->size, GB_void) ;
}
if (Cp == NULL || (allocate_new_Cx && (Cx == NULL))
|| (allocate_new_Ci && (Ci == NULL)))
{
// out of memory
GB_FREE (Cp) ;
GB_FREE (Ci) ;
GB_FREE (Cx) ;
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// numerical values of C: apply the op, typecast, or make shallow copy
//----------------------------------------------------------------------
if (op1 != NULL || op2 != NULL)
{
// Cx = op ((op->xtype) Ax)
C->x = Cx ; C->x_shallow = false ;
GB_apply_op ((GB_void *) Cx,
op1, op2, scalar, binop_bind1st,
(const GB_void *) Ax, atype, anz, Context) ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ; C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, asize, anz, 1) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A
C->x = Ax ; C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
//----------------------------------------------------------------------
// pattern of C
//----------------------------------------------------------------------
if (A_is_hyper)
{
//------------------------------------------------------------------
// each non-empty vector in A becomes an entry in C
//------------------------------------------------------------------
ASSERT (!allocate_new_Ci) ;
C->i = Ah ; C->i_shallow = (in_place) ? Ah_shallow : true ;
ASSERT (anvec == anz) ;
Ah = NULL ; // do not free prior Ah
}
else
{
//------------------------------------------------------------------
// find the non-empty vectors of A, which become entries in C
//------------------------------------------------------------------
ASSERT (allocate_new_Ci) ;
ASSERT (Ah == NULL) ;
if (nth == 1)
{
//--------------------------------------------------------------
// construct Ci with a single thread
//--------------------------------------------------------------
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
ASSERT (k == anz) ;
}
else
{
//--------------------------------------------------------------
// construct Ci in parallel
//--------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = 0 ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
k++ ;
}
}
Count [tid] = k ;
}
GB_cumsum (Count, ntasks, NULL, 1) ;
ASSERT (Count [ntasks] == anz) ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = Count [tid] ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
}
}
#ifdef GB_DEBUG
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
ASSERT (Ci [k] == j) ;
k++ ;
}
}
ASSERT (k == anz) ;
#endif
C->i = Ci ; C->i_shallow = false ;
}
//----------------------------------------------------------------------
// vector pointers of C
//----------------------------------------------------------------------
C->nzmax = anz ;
// C->p = [0 anz] and C->h = NULL
ASSERT (C->plen == 1) ;
ASSERT (C->nvec == 1) ;
ASSERT (C->h == NULL) ;
C->p = Cp ; C->p_shallow = false ;
C->nvec_nonempty = (anz == 0) ? 0 : 1 ;
// fill the vector pointers C->p
Cp [0] = 0 ;
Cp [1] = anz ;
C->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// free prior space
//----------------------------------------------------------------------
GB_FREE_IN_PLACE_A ;
}
else
{
//======================================================================
// transpose a general matrix
//======================================================================
ASSERT_MATRIX_OK_OR_JUMBLED (A, "A GB_transpose jumbled ok", GB0) ;
ASSERT (avdim > 1 && avlen > 1) ;
// T=A' with optional typecasting, or T=op(A')
//----------------------------------------------------------------------
// select the method
//----------------------------------------------------------------------
// for the qsort method, if the transpose is done in place and A->i is
// not shallow, A->i can be used and then freed. Otherwise, A->i is
// not modified at all.
bool recycle_Ai = (in_place && !Ai_shallow) ;
bool use_qsort ;
if (A_is_hyper)
{
//------------------------------------------------------------------
// always use qsort for hypersparse matrices
//------------------------------------------------------------------
use_qsort = true ;
}
else
{
//------------------------------------------------------------------
// select qsort if the transpose will likely be hypersparse
//------------------------------------------------------------------
use_qsort = GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET (anz, avlen) ;
}
//----------------------------------------------------------------------
// transpose the matrix with the selected method
//----------------------------------------------------------------------
if (use_qsort)
{
//==================================================================
// transpose via quicksort
//==================================================================
//------------------------------------------------------------------
// allocate and create iwork
//------------------------------------------------------------------
// allocate iwork of size anz
int64_t *iwork = GB_MALLOC (anz, int64_t) ;
if (iwork == NULL)
{
// out of memory
GB_FREE_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
// Construct the "row" indices of C, which are "column" indices of
// A. This array becomes the permanent T->i on output. This phase
// must be done before Chandle is created below, since that step
// destroys A.
GB_extract_vector_list (iwork, A, nthreads) ;
//------------------------------------------------------------------
// allocate the output matrix and additional space (jwork and S)
//------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse
info = GB_new (Chandle, ctype, avdim, avlen, GB_Ap_null, C_is_csc,
GB_FORCE_HYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE (iwork) ;
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// if in_place, the prior Ap and Ah can now be freed
if (in_place)
{
if (!Ap_shallow) GB_FREE (Ap) ;
if (!Ah_shallow) GB_FREE (Ah) ;
}
int64_t *jwork = NULL ;
GB_Type_code scode ;
GB_void *S = NULL ;
GB_void *Swork = NULL ;
bool ok = true ;
if (!recycle_Ai)
{
// allocate jwork of size anz
jwork = GB_MALLOC (anz, int64_t) ;
ok = ok && (jwork != NULL) ;
}
if (op1 != NULL || op2 != NULL)
{
// allocate Swork of size anz * csize
Swork = GB_MALLOC (anz * csize, GB_void) ;
ok = ok && (Swork != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE (iwork) ;
GB_FREE (jwork) ;
GB_FREE (Swork) ;
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//------------------------------------------------------------------
// construct jwork and Swork
//------------------------------------------------------------------
// "row" indices of A become "column" indices of C
if (recycle_Ai)
{
// Ai is used as workspace for the "column" indices of C.
// jwork is a shallow copy of Ai, and is freed by GB_builder.
jwork = Ai ;
ASSERT (in_place) ;
// set Ai to NULL so it is not freed by GB_FREE_IN_PLACE_A
Ai = NULL ;
}
else
{
// jwork = Ai, making a deep copy. jwork is freed by
// GB_builder. A->i is not modified, even if out of memory.
GB_memcpy (jwork, Ai, anz * sizeof (int64_t), nthreads) ;
}
// numerical values: apply the op, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL)
{
// Swork = op ((op->xtype) Ax)
GB_apply_op ((GB_void *) Swork,
op1, op2, scalar, binop_bind1st,
(const GB_void *) Ax, atype, anz, Context) ;
// GB_builder will not need to typecast Swork to T->x, and it
// may choose to transplant it into T->x
scode = ccode ;
#if 0
if (in_place && !Ax_shallow)
{
// A is being transposed in place so A->x is no longer
// needed. If A->x is shallow this can be skipped. T->x
// will not be shallow if the op is present. A->x should
// be freed early to free up space for GB_builder.
// However, in the current usage, when op is used, A is not
// transposed in place, so this step is not needed.
ASSERT (GB_DEAD_CODE) ;
GB_FREE (Ax) ;
}
#endif
}
else
{
// GB_builder will typecast S from atype to ctype if needed.
// S is a shallow copy of Ax, and must not be modified.
S = Ax ;
scode = acode ;
}
//------------------------------------------------------------------
// build the matrix: T = (ctype) A' or op ((xtype) A')
//------------------------------------------------------------------
// internally, jwork is freed and then T->x is allocated, so the
// total high-water memory usage is anz * max (csize,
// sizeof(int64_t)). T is always hypersparse.
// If op is not NULL, then Swork can be transplanted into T in
// GB_builder, instead. However, this requires the tuples to be
// sorted on input, which is possible but rare for GB_transpose.
GrB_Matrix T ;
info = GB_builder
(
&T, // create T
ctype, // T is of type ctype
avdim, // T->vlen = A->vdim, always > 1
avlen, // T->vdim = A->vlen, always > 1
C_is_csc, // T has the same CSR/CSC format as C
&iwork, // iwork_handle, becomes T->i on output
&jwork, // jwork_handle, freed on output
&Swork, // Swork_handle, freed on output
false, // tuples are not sorted on input
true, // tuples have no duplicates
anz, // size of iwork, jwork, and Swork
true, // is_matrix: unused
false, // ijcheck: unused
NULL, NULL, // original I,J indices: not used here
S, // array of values of type scode, not modified
anz, // number of tuples
NULL, // no dup operator needed (input has no duplicates)
scode, // type of S or Swork
Context
) ;
// GB_builder always frees jwork, and either frees iwork or
// transplants it in to T->i and sets iwork to NULL. So iwork and
// jwork are always NULL on output. GB_builder does not modify S.
ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ;
//------------------------------------------------------------------
// free prior space and transplant T into C
//------------------------------------------------------------------
// Free the prior content of the input matrix, if done in place.
// Ap, Ah, and Ai have already been freed, but Ax has not.
GB_FREE_IN_PLACE_A ;
if (info != GrB_SUCCESS)
{
// out of memory in GB_builder
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (info) ;
}
// Transplant T in to the result C. The matrix T is not shallow
// and no typecasting is done, so this will always succeed.
info = GB_transplant (*Chandle, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
//==================================================================
// transpose via bucket sort
//==================================================================
// This method does not operate on the matrix in place, so it must
// create a temporary matrix T. Then the input matrix is freed and
// replaced with the new matrix T.
ASSERT (!A_is_hyper) ;
// T is also typecasted to ctype, if not NULL
GrB_Matrix T ;
info = GB_transpose_bucket (&T, ctype, C_is_csc, A,
op1, op2, scalar, binop_bind1st,
Context) ;
// free prior content, if C=A' is being done in place
if (in_place_A)
{
// free all content of A, but not the header, if in place of A
GB_PHIX_FREE (A) ; // transpose in-place
}
else if (in_place_C)
{
// free all of C, including the header, if done in place of C
GB_MATRIX_FREE (Chandle) ;
}
if (info != GrB_SUCCESS)
{
// out of memory in GB_transpose_bucket
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
ASSERT_MATRIX_OK (T, "T from bucket", GB0) ;
if (in_place_A)
{
// The header of A has not been freed, since it is used for the
// output. Transplant T back into A and free T. T is not
// shallow and no typecast is done so this will always succeed.
info = GB_transplant (A, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
// If C=A' is done in place of C, then the header and content
// of the input C has been freed. The output T can now be
// moved to the Chandle.
ASSERT (*Chandle == NULL) ;
(*Chandle) = T ;
}
}
}
//--------------------------------------------------------------------------
// free workspace
//--------------------------------------------------------------------------
GB_FREE_WORK ;
//--------------------------------------------------------------------------
// conform the result to the desired hypersparsity of A
//--------------------------------------------------------------------------
// get the output matrix
C = (*Chandle) ;
// transplant the hyper_ratio from A to C
C->hyper_ratio = A_hyper_ratio ;
ASSERT_MATRIX_OK (C, "C to conform in GB_transpose", GB0) ;
info = GB_to_hyper_conform (C, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
ASSERT_MATRIX_OK (*Chandle, "Chandle conformed in GB_transpose", GB0) ;
return (GrB_SUCCESS) ;
}
|
9644.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (j, k) num_threads(2)
{
/* E := A*B */
#pragma omp for schedule(static, 8)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
/* F := C*D */
#pragma omp for schedule(static, 8)
for (i = 0; i < _PB_NJ; i++)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
/* G := E*F */
#pragma omp for schedule(static, 8)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
simple_particle_filter.h | // -*- mode: c++ -*-
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2015, JSK Lab
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/o2r other materials provided
* with the distribution.
* * Neither the name of the JSK Lab nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#ifndef JSK_PCL_ROS_ROS_COLLABORATIVE_PARTICLE_FILTER_H_
#define JSK_PCL_ROS_ROS_COLLABORATIVE_PARTICLE_FILTER_H_
#include <pcl/tracking/particle_filter.h>
#include <pcl/tracking/impl/particle_filter.hpp>
namespace pcl
{
namespace tracking
{
// Particle filter class which is friendly collaborative with ROS programs.
// Default behavior is same to the ParticleFilterTracker but you can customize
// the behavior from outer of the class.
template <typename PointInT, typename StateT>
class ROSCollaborativeParticleFilterTracker: public ParticleFilterTracker<PointInT, StateT>
{
public:
using Tracker<PointInT, StateT>::input_;
using ParticleFilterTracker<PointInT, StateT>::particles_;
using ParticleFilterTracker<PointInT, StateT>::changed_;
typedef boost::shared_ptr<ROSCollaborativeParticleFilterTracker> Ptr;
typedef typename Tracker<PointInT, StateT>::PointCloudIn PointCloudIn;
typedef typename PointCloudIn::Ptr PointCloudInPtr;
typedef typename PointCloudIn::ConstPtr PointCloudInConstPtr;
typedef typename Tracker<PointInT, StateT>::PointCloudState PointCloudState;
typedef typename PointCloudState::Ptr PointCloudStatePtr;
typedef typename PointCloudState::ConstPtr PointCloudStateConstPtr;
typedef PointCoherence<PointInT> Coherence;
typedef boost::shared_ptr< Coherence > CoherencePtr;
typedef boost::shared_ptr< const Coherence > CoherenceConstPtr;
typedef PointCloudCoherence<PointInT> CloudCoherence;
typedef boost::shared_ptr< CloudCoherence > CloudCoherencePtr;
typedef boost::shared_ptr< const CloudCoherence > CloudCoherenceConstPtr;
typedef boost::function<StateT (const StateT&)> CustomSampleFunc;
typedef boost::function<void (PointCloudInConstPtr, StateT&)> CustomLikelihoodFunc;
ROSCollaborativeParticleFilterTracker():
ParticleFilterTracker<PointInT, StateT>()
{
motion_ratio_ = 0.0;
changed_ = true;
}
void setParticles(PointCloudStatePtr particles)
{
particles_ = particles;
}
void setCustomSampleFunc(CustomSampleFunc f)
{
custom_sample_func_ = f;
}
void setLikelihoodFunc(CustomLikelihoodFunc f)
{
custom_likelihood_func_ = f;
}
protected:
bool initCompute()
{
// Do nothing
return true;
}
void weight()
{
if (!particles_) {
std::cerr << "no particles" << std::endl;
}
if (!input_) {
std::cerr << "no input pointcloud" << std::endl;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (size_t i = 0; i < particles_->points.size (); i++) {
custom_likelihood_func_ (input_, particles_->points[i]);
}
normalizeWeight();
}
void
resample()
{
std::vector<int> a (particles_->points.size ());
std::vector<double> q (particles_->points.size ());
this->genAliasTable (a, q, particles_);
// memoize the original list of particles
PointCloudStatePtr origparticles = particles_;
particles_.reset(new PointCloudState());
particles_->points.reserve(origparticles->points.size() + 1); // particle_num_??
// the first particle, it is a just copy of the maximum result
// StateT p = representative_state_;
// particles_->points.push_back (p);
// with motion
int motion_num = static_cast<int> (particles_->points.size ()) * static_cast<int> (motion_ratio_);
for ( int i = 1; i < motion_num; i++ ) {
int target_particle_index = sampleWithReplacement (a, q);
StateT p = origparticles->points[target_particle_index];
p = custom_sample_func_(p);
p = p + motion_;
particles_->points.push_back (p);
}
// no motion
for ( int i = motion_num; i < particle_num_; i++ ) {
int target_particle_index = sampleWithReplacement (a, q);
StateT p = origparticles->points[target_particle_index];
// add noise using gaussian
p = custom_sample_func_(p);
particles_->points.push_back (p);
}
}
bool initParticles(bool)
{
// Do nothing
return true;
}
void computeTracking()
{
// r->w->u ?
// w->u->r ?
for (int i = 0; i < iteration_num_; i++) {
resample();
weight();
update();
}
}
void normalizeWeight()
{
double n = 0.0;
for (size_t i = 0; i < particles_->points.size(); i++) {
n = particles_->points[i].weight + n;
}
if (n != 0.0) {
for (size_t i = 0; i < particles_->points.size(); i++) {
particles_->points[i].weight = particles_->points[i].weight / n;
}
}
else {
for (size_t i = 0; i < particles_->points.size(); i++) {
particles_->points[i].weight = 1.0 / particles_->points.size();
}
}
}
using ParticleFilterTracker<PointInT, StateT>::iteration_num_;
using ParticleFilterTracker<PointInT, StateT>::update;
using ParticleFilterTracker<PointInT, StateT>::representative_state_;
using ParticleFilterTracker<PointInT, StateT>::motion_ratio_;
using ParticleFilterTracker<PointInT, StateT>::particle_num_;
using ParticleFilterTracker<PointInT, StateT>::motion_;
using ParticleFilterTracker<PointInT, StateT>::sampleWithReplacement;
CustomSampleFunc custom_sample_func_;
CustomLikelihoodFunc custom_likelihood_func_;
private:
};
}
}
#endif
|
opencl_office2013_fmt_plug.c | /* MS Office 2013 cracker patch for JtR. Hacked together during March of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>
*
* This OpenCL format by magnum.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and Copyright (c) 2012, magnum and it is hereby released to the general public
* under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_office2013;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_office2013);
#else
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "aes.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "unicode.h"
#include "common-opencl.h"
#include "office_common.h"
#include "config.h"
#include "sha2.h"
#define PLAINTEXT_LENGTH 47
#define UNICODE_LENGTH 96 /* In octets, including 0x80 */
#define FORMAT_LABEL "office2013-opencl"
#define FORMAT_NAME "MS Office 2013"
#define OCL_ALGORITHM_NAME "SHA512 OpenCL"
#define CPU_ALGORITHM_NAME " AES"
#define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME
#define BENCHMARK_COMMENT " (100,000 iterations)"
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_LENGTH 16
#define SALT_SIZE sizeof(*cur_salt)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
/* 2013-openwall.pptx */
{"$office$*2013*100000*256*16*9b12805dd6d56f46d07315153f3ecb9c*c5a4a167b51faa6629f6a4caf0b4baa8*87397e0659b2a6fff90291f8e6d6d0018b750b792fefed77001edbafba7769cd", "openwall"},
/* 365-2013-openwall.docx */
{"$office$*2013*100000*256*16*774a174239a7495a59cac39a122d991c*b2f9197840f9e5d013f95a3797708e83*ecfc6d24808691aac0daeaeba72aba314d72c6bbd12f7ff0ea1a33770187caef", "openwall"},
/* 365-2013-password.docx */
{"$office$*2013*100000*256*16*d4fc9302eedabf9872b24ca700a5258b*7c9554d582520747ec3e872f109a7026*1af5b5024f00e35eaf5fd8148b410b57e7451a32898acaf14275a8c119c3a4fd", "password"},
/* 365-2013-password.xlsx */
{"$office$*2013*100000*256*16*59b49c64c0d29de733f0025837327d50*70acc7946646ea300fc13cfe3bd751e2*627c8bdb7d9846228aaea81eeed434d022bb93bb5f4da146cb3ad9d847de9ec9", "password"},
/* 365-2013-strict-password.docx */
{"$office$*2013*100000*256*16*f1c23049d85876e6b20e95ab86a477f1*13303dbd27a38ea86ef11f1b2bc56225*9a69596de0655a6c6a5b2dc4b24d6e713e307fb70af2d6b67b566173e89f941d", "password"},
{NULL}
};
static ms_office_custom_salt *cur_salt;
static int *cracked, any_cracked;
static char *saved_key; /* Password encoded in UCS-2 */
static int *saved_len; /* UCS-2 password length, in octets */
static char *saved_salt;
static unsigned char *key; /* Output key from kernel */
static int new_keys, spincount;
static cl_mem cl_saved_key, cl_saved_len, cl_salt, cl_pwhash, cl_key, cl_spincount;
static cl_mem pinned_saved_key, pinned_saved_len, pinned_salt, pinned_key;
static cl_kernel GenerateSHA512pwhash, Generate2013key;
static struct fmt_main *self;
#define HASH_LOOPS 100 /* Lower figure gives less X hogging */
#define ITERATIONS 100000
#define STEP 0
#define SEED 128
static const char * warn[] = {
"xfer: ", ", xfer: ", ", init: ", ", loop: ", ", final: ", ", xfer: "
};
static int split_events[] = { 3, -1, -1 };
//This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
size_t s;
s = autotune_get_task_max_work_group_size(FALSE, 0, GenerateSHA512pwhash);
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel));
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, Generate2013key));
return s;
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
int i;
int bench_len = strlen(tests[0].plaintext) * 2;
gws *= ocl_v_width;
pinned_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, UNICODE_LENGTH * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, UNICODE_LENGTH * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_key = (char*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, UNICODE_LENGTH * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_key");
memset(saved_key, 0, UNICODE_LENGTH * gws);
pinned_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, sizeof(cl_int) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(cl_int) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_len = (int*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_len, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, sizeof(cl_int) * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_len");
for (i = 0; i < gws; i++)
saved_len[i] = bench_len;
pinned_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, SALT_LENGTH, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, SALT_LENGTH, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_salt = (char*) clEnqueueMapBuffer(queue[gpu_id], pinned_salt, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, SALT_LENGTH, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_salt");
memset(saved_salt, 0, SALT_LENGTH);
cl_pwhash = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_ulong) * 9 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device state buffer");
pinned_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, 128 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, 128 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
key = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 128 * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory verifier keys");
memset(key, 0, 128 * gws);
cl_spincount = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, sizeof(cl_int), &spincount, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping spincount");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA512pwhash, 0, sizeof(cl_mem), (void*)&cl_saved_key), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA512pwhash, 1, sizeof(cl_mem), (void*)&cl_saved_len), "Error setting argument 1");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA512pwhash, 2, sizeof(cl_mem), (void*)&cl_salt), "Error setting argument 2");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA512pwhash, 3, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 3");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(Generate2013key, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(Generate2013key, 1, sizeof(cl_mem), (void*)&cl_key), "Error setting argument 1");
HANDLE_CLERROR(clSetKernelArg(Generate2013key, 2, sizeof(cl_mem), (void*)&cl_spincount), "Error setting argument 2");
cracked = mem_alloc(sizeof(*cracked) * gws);
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_key, key, 0, NULL, NULL), "Error Unmapping key");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_key, saved_key, 0, NULL, NULL), "Error Unmapping saved_key");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_len, saved_len, 0, NULL, NULL), "Error Unmapping saved_len");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_salt, saved_salt, 0, NULL, NULL), "Error Unmapping saved_salt");
HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings");
HANDLE_CLERROR(clReleaseMemObject(cl_spincount), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_saved_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_saved_len), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_salt), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_saved_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_saved_len), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_salt), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_pwhash), "Release GPU buffer");
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(GenerateSHA512pwhash), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(Generate2013key), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void clear_keys(void)
{
memset(saved_key, 0, UNICODE_LENGTH * global_work_size * ocl_v_width);
memset(saved_len, 0, sizeof(*saved_len) * global_work_size * ocl_v_width);
}
static void set_key(char *key, int index)
{
UTF16 *utfkey = (UTF16*)&saved_key[index * UNICODE_LENGTH];
/* convert key to UTF-16LE */
saved_len[index] = enc_to_utf16(utfkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
if (saved_len[index] < 0)
saved_len[index] = strlen16(utfkey);
/* Prepare for GPU */
utfkey[saved_len[index]] = 0x80;
saved_len[index] <<= 1;
new_keys = 1;
}
static void set_salt(void *salt)
{
cur_salt = (ms_office_custom_salt *)salt;
memcpy(saved_salt, cur_salt->osalt, SALT_LENGTH);
spincount = cur_salt->spinCount;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE, 0, SALT_LENGTH, saved_salt, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_salt");
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_spincount, CL_FALSE, 0, 4, &spincount, 0, NULL, NULL), "failed in clEnqueueWriteBuffer spincount");
}
static void init(struct fmt_main *_self)
{
static char valgo[32] = "";
self = _self;
opencl_prepare_dev(gpu_id);
/* VLIW5 can't take the register pressure from vectorizing this.
Well it can, and it does get faster but only at a GWS that will
give a total time for crypt_all() of > 30 seconds. */
if (options.v_width || !amd_vliw5(device_info[gpu_id]))
if ((ocl_v_width = opencl_get_vector_width(gpu_id,
sizeof(cl_long))) > 1) {
/* Run vectorized kernel */
snprintf(valgo, sizeof(valgo),
OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, ocl_v_width);
self->params.algorithm_name = valgo;
}
if (options.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DHASH_LOOPS=%u -DUNICODE_LENGTH=%u -DV_WIDTH=%u",
HASH_LOOPS,
UNICODE_LENGTH,
ocl_v_width);
opencl_init("$JOHN/kernels/office2013_kernel.cl", gpu_id,
build_opts);
// Create kernels to execute
GenerateSHA512pwhash = clCreateKernel(program[gpu_id], "GenerateSHA512pwhash", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
crypt_kernel = clCreateKernel(program[gpu_id], "HashLoop", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
Generate2013key = clCreateKernel(program[gpu_id], "Generate2013key", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
//Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn,
3, self, create_clobj, release_clobj,
ocl_v_width * UNICODE_LENGTH, 0, db);
//Auto tune execution from shared/included code.
autotune_run(self, ITERATIONS + 4, 0,
(cpu(device_info[gpu_id]) ?
1000000000 : 10000000000ULL));
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t gws, scalar_gws;
size_t *lws = local_work_size ? &local_work_size : NULL;
gws = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size);
scalar_gws = gws * ocl_v_width;
if (any_cracked) {
memset(cracked, 0, count * sizeof(*cracked));
any_cracked = 0;
}
if (ocl_autotune_running || new_keys) {
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * scalar_gws, saved_key, 0, NULL, multi_profilingEvent[0]), "failed in clEnqueueWriteBuffer saved_key");
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * scalar_gws, saved_len, 0, NULL, multi_profilingEvent[1]), "failed in clEnqueueWriteBuffer saved_len");
new_keys = 0;
}
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], GenerateSHA512pwhash, 1, NULL, &scalar_gws, lws, 0, NULL, multi_profilingEvent[2]), "failed in clEnqueueNDRangeKernel");
for (index = 0; index < (ocl_autotune_running ? 1 : spincount / HASH_LOOPS); index++) {
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[3]), "failed in clEnqueueNDRangeKernel");
BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel");
opencl_process_event();
}
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], Generate2013key, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[4]), "failed in clEnqueueNDRangeKernel");
// read back verifier keys
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_key, CL_TRUE, 0, 128 * scalar_gws, key, 0, NULL, multi_profilingEvent[5]), "failed in reading key back");
if (!ocl_autotune_running) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
SHA512_CTX ctx;
unsigned char decryptedVerifierHashInputBytes[16];
unsigned char decryptedVerifierHashBytes[32];
unsigned char hash[64];
ms_office_common_DecryptUsingSymmetricKeyAlgorithm(cur_salt, &key[128*index], cur_salt->encryptedVerifier, decryptedVerifierHashInputBytes, 16);
ms_office_common_DecryptUsingSymmetricKeyAlgorithm(cur_salt, &key[128*index+64], cur_salt->encryptedVerifierHash, decryptedVerifierHashBytes, 32);
SHA512_Init(&ctx);
SHA512_Update(&ctx, decryptedVerifierHashInputBytes, 16);
SHA512_Final(hash, &ctx);
if (!memcmp(hash, decryptedVerifierHashBytes, 20))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static char *get_key(int index)
{
UTF16 buf[PLAINTEXT_LENGTH + 1];
memcpy(buf, &saved_key[index * UNICODE_LENGTH], saved_len[index]);
buf[saved_len[index] >> 1] = 0;
return (char*)utf16_to_enc(buf);
}
struct fmt_main fmt_opencl_office2013 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG_OFFICE_2013 },
tests
}, {
init,
done,
reset,
fmt_default_prepare,
ms_office_common_valid_2013,
fmt_default_split,
fmt_default_binary,
ms_office_common_get_salt,
{
ms_office_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::ast_type_traits::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
llvm::Regex RE(RegExp);
return RE.match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the the statement are expanded from different
/// appearances of the macro.
///
/// FIXME: Change to be a polymorphic matcher that works on any syntactic
/// node. There's nothing `Stmt`-specific about it.
AST_MATCHER_P(clang::Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPublic())
/// matches 'int a;'
AST_MATCHER(Decl, isPublic) {
return Node.getAccess() == AS_public;
}
/// Matches protected C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isProtected())
/// matches 'int b;'
AST_MATCHER(Decl, isProtected) {
return Node.getAccess() == AS_protected;
}
/// Matches private C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPrivate())
/// matches 'int c;'
AST_MATCHER(Decl, isPrivate) {
return Node.getAccess() == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(ast_type_traits::TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(ast_type_traits::TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(ast_type_traits::TraversalKind TK,
const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(ast_type_traits::TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(ast_type_traits::TraversalKind TK,
const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
traverse(
ast_type_traits::TraversalKind TK,
const internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>
&InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
}
template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
traverse(
ast_type_traits::TraversalKind TK,
const internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>
&InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatchers.
///
/// However, \c optionally will generate a result binding for each matching
/// submatcher.
///
/// Useful when additional information which may or may not present about a
/// main matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
1, std::numeric_limits<unsigned>::max()>
optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::Matcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::Matcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
assert(!RegExp.empty());
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
llvm::Regex RE(RegExp);
return RE.match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name);
}
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) {
assert(!RegExp.empty());
std::string SelectorString = Node.getSelector().getAsString();
llvm::Regex RE(RegExp);
return RE.match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = ast_type_traits::DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; })
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches if the given method declaration is virtual.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isVirtual) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(),
Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the Stmt AST node that is marked as being the structured-block
/// of an OpenMP executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``stmt(isOMPStructuredBlock()))`` matches ``{}``.
AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); }
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``.
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == OMPC_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == OMPC_DEFAULT_shared;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
pr27573.c | /* PR middle-end/27573 */
/* { dg-do compile } */
/* { dg-require-profiling "-fprofile-generate" } */
/* { dg-options "-O2 -fopenmp -fprofile-generate" } */
extern int puts (const char *);
int
main (void)
{
int i, j = 8;
#pragma omp parallel
{
puts ("foo");
for (i = 1; i < j - 1; i++)
;
}
return 0;
}
|
valid.yolo11.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_28272_17_17_1024_1_1.h"
#include "gen_ukr_A1B2gemm_1_28272_17_17_1024_1_1.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 17;
int Ny = 17;
int Nh = 1;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int xy5=0;xy5<289+0;xy5+=289)
{
for(int f5=0;f5<28272+0;f5+=28272)
{
for(int c5=0;c5<1024+0;c5+=1024)
{
for(int c4=c5;c4<min(1024, 1024+c5);c4+=1024)
{
for(int f4=f5;f4<min(28272, 28272+f5);f4+=3072)
{
for(int xy4=xy5;xy4<min(289, 289+xy5);xy4+=289)
{
for(int c3=c4;c3<min(1024, 1024+c4);c3+=Tc1)
{
for(int xy3=xy4;xy3<min(289, 289+xy4);xy3+=Txy3)
{
for(int f3=f4;f3<min(28272, 3072+f4);f3+=Tf2)
{
for(int xy2=xy3;xy2<min(289, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(28272, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(1024, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(1024, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(289, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(28272, 16+f2);f1+=16)
{
int ctile=min(Tc1, 1024-c1);
int x1=xy1/17;
int y1=xy1%17/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*295936+c1_1*289+1*x1*17+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*16384+c1*16+0*16+0*16+kf1_2*1;
int offsetC=0+b1*8170608+of1_1*289+x1*17+y1*1+of1_2*1;
if(17-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(17*17-xy1>=6){
for(int sti=17-y1;sti<6;sti+=1)
{
Astrides[sti]+=0;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=17-y1;sti<6;sti+=1)
{
Astrides[sti]-=0;
}
}
else{
cnn_ukr_float_scatter_1x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
smul.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <HiParTI.h>
static void print_usage(char ** argv) {
printf("Usage: %s [options] \n\n", argv[0]);
printf("Options: -X INPUT (.tns file)\n");
printf(" -a INPUT (a scalar)\n");
printf(" -Z OUTPUT (output file name)\n");
printf(" Parallel CPU: use 'export OMP_NUM_THREADS = [number]'\n");
printf(" --help\n");
printf("\n");
}
/**
* Benchmark COO tensor multiplication with a scalar.
*/
int main(int argc, char *argv[]) {
FILE *fZ = NULL;
char Xfname[1000];
ptiValue a;
ptiSparseTensor X;
int niters = 5;
int nthreads;
ptiTimer timer;
ptiNewTimer(&timer, 0);
if(argc < 3) {
print_usage(argv);
exit(1);
}
static struct option long_options[] = {
{"Xinput", required_argument, 0, 'X'},
{"ainput", required_argument, 0, 'a'},
{"Zoutput", optional_argument, 0, 'Z'},
{"dev-id", optional_argument, 0, 'd'},
{"help", no_argument, 0, 0},
{0, 0, 0, 0}
};
int c;
for(;;) {
int option_index = 0;
c = getopt_long(argc, argv, "X:a:Z:d:", long_options, &option_index);
if(c == -1) {
break;
}
switch(c) {
case 'X':
strcpy(Xfname, optarg);
printf("X input file: %s\n", Xfname); fflush(stdout);
break;
case 'a':
sscanf(optarg, "%"HIPARTI_SCN_VALUE, &a);
break;
case 'Z':
fZ = fopen(optarg, "w");
ptiAssert(fZ != NULL);
printf("Z output file: %s\n", optarg); fflush(stdout);
break;
case '?': /* invalid option */
case 'h':
default:
print_usage(argv);
exit(1);
}
}
printf("Scaling a: %"HIPARTI_PRI_VALUE"\n", a);
ptiAssert(ptiLoadSparseTensor(&X, 1, Xfname) == 0);
/* For warm-up caches, timing not included */
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
printf("\nnthreads: %d\n", nthreads);
#endif
ptiAssert(ptiSparseTensorMulScalar(&X, a) == 0);
ptiStartTimer(timer);
for(int it=0; it<niters; ++it) {
ptiAssert(ptiSparseTensorMulScalar(&X, a) == 0);
}
ptiStopTimer(timer);
ptiPrintAverageElapsedTime(timer, niters, "Average CooMulScalar");
ptiFreeTimer(timer);
if(fZ != NULL) {
ptiAssert(ptiDumpSparseTensor(&X, 1, fZ) == 0);
fclose(fZ);
}
ptiFreeSparseTensor(&X);
return 0;
}
|
computeGraph.c | #include "defs.h"
double computeGraph(graph* G, graphSDG* SDGdata)
{
mcsim_skip_instrs_begin();
VERT_T* endV;
LONG_T *degree, *numEdges, *pos, *pSums;
WEIGHT_T* w;
double elapsed_time;
#ifdef _OPENMP
omp_lock_t *vLock;
LONG_T chunkSize;
#endif
elapsed_time = get_seconds();
#ifdef _OPENMP
omp_set_num_threads(NUM_THREADS);
#endif
#ifdef _OPENMP
#pragma omp parallel
{
#endif
LONG_T i, j, u, n, m, tid, nthreads;
#ifdef DIAGNOSTIC
double elapsed_time_part;
#endif
#ifdef _OPENMP
nthreads = omp_get_num_threads();
tid = omp_get_thread_num();
#else
tid = 0;
nthreads = 1;
#endif
n = N;
m = M;
if (tid == 0) {
#ifdef _OPENMP
vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t));
assert(vLock != NULL);
chunkSize = n/nthreads;
#endif
pos = (LONG_T *) malloc(m*sizeof(LONG_T));
assert(pos != NULL);
degree = (LONG_T *) calloc(n, sizeof(LONG_T));
assert(degree != NULL);
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for schedule(static, chunkSize)
for (i=0; i<n; i++) {
omp_init_lock(&vLock[i]);
}
#pragma omp barrier
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Lock initialization time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#pragma omp for
#endif
for (i=0; i<m; i++) {
u = SDGdata->startVertex[i];
#ifdef _OPENMP
omp_set_lock(&vLock[u]);
#endif
pos[i] = degree[u]++;
#ifdef _OPENMP
omp_unset_lock(&vLock[u]);
#endif
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Degree computation time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for schedule(static, chunkSize)
for (i=0; i<n; i++) {
omp_destroy_lock(&vLock[i]);
}
if (tid == 0)
free(vLock);
#endif
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Lock destruction time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
if (tid == 0) {
numEdges = (LONG_T *) malloc((n+1)*sizeof(LONG_T));
pSums = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
}
#ifdef _OPENMP
#pragma omp barrier
#endif
prefix_sums(degree, numEdges, pSums, n);
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Prefix sums time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#endif
if (tid == 0) {
free(degree);
free(pSums);
w = (WEIGHT_T *) malloc(m*sizeof(WEIGHT_T));
endV = (VERT_T *) malloc(m* sizeof(VERT_T));
}
mcsim_skip_instrs_end();
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for
#endif
for (i=0; i<m; i++) {
#ifdef PERSISTENT
mcsim_skip_instrs_begin();
LONG_T undolog_u, redolog_u, undolog_j, redolog_j;
VERT_T *undolog_endV, *redolog_endV;
WEIGHT_T* undolog_w, *redolog_w;
undolog_w = (WEIGHT_T *) malloc(m*sizeof(WEIGHT_T));
redolog_w = (WEIGHT_T *) malloc(m*sizeof(WEIGHT_T));
undolog_endV = (VERT_T *) malloc(m* sizeof(VERT_T));
redolog_endV = (VERT_T *) malloc(m* sizeof(VERT_T));
mcsim_skip_instrs_end();
mcsim_log_begin();
//mcsim_skip_instrs_begin();
#ifdef UNDOLOG
undolog_u = u;
undolog_j = j;
undolog_endV[j] = endV[j-1];
undolog_w[j] = w[j-1];
#endif // UNDOLOG
#ifdef REDOLOG
redolog_u = SDGdata->startVertex[i];
redolog_j = numEdges[u] + pos[i];
redolog_endV[j] = SDGdata->endVertex[i];
redolog_w[j] = SDGdata->weight[i];
#endif // REDOLOG
//mcsim_skip_instrs_end();
mcsim_mem_fence();
mcsim_log_end();
mcsim_mem_fence();
#endif // PERSISTENT
u = SDGdata->startVertex[i];
j = numEdges[u] + pos[i];
endV[j] = SDGdata->endVertex[i];
w[j] = SDGdata->weight[i];
// make sure undolog and redolog data structures are not discarded by compiler
#ifdef PERSISTENT
mcsim_skip_instrs_begin();
printf("%d\n", (int)((sizeof undolog_u) + (sizeof undolog_j) +
(sizeof undolog_endV) + (sizeof undolog_w) + (sizeof redolog_u) +
(sizeof redolog_j) + (sizeof redolog_endV) + (sizeof redolog_w)));
mcsim_skip_instrs_end();
#endif // PERSISTENT
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Edge data structure construction time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
if (tid == 0) {
free(pos);
#ifdef PERSISTENT
mcsim_skip_instrs_begin();
graph *undolog_G, *redolog_G;
#ifdef UNDOLOG
undolog_G = (graph *) malloc(sizeof(graph));
undolog_G = (graph *) malloc(sizeof(graph));
#endif // UNDOLOG
#ifdef REDOLOG
redolog_G = (graph *) malloc(sizeof(graph));
redolog_G = (graph *) malloc(sizeof(graph));
#endif // REDOLOG
mcsim_skip_instrs_end();
mcsim_log_begin();
//mcsim_skip_instrs_begin();
#ifdef UNDOLOG
undolog_G->n = G->n;
undolog_G->m = G->m;
undolog_G->numEdges = G->numEdges;
undolog_G->endV = G->endV;
undolog_G->weight = G->weight;
#endif // UNDOLOG
#ifdef REDOLOG
redolog_G->n = n;
redolog_G->m = m;
redolog_G->numEdges = numEdges;
redolog_G->endV = endV;
redolog_G->weight = w;
#endif // REDOLOG
//mcsim_skip_instrs_end();
mcsim_mem_fence();
mcsim_log_end();
mcsim_mem_fence();
#endif // PERSISTENT
G->n = n;
G->m = m;
G->numEdges = numEdges;
G->endV = endV;
G->weight = w;
// make sure undolog and redolog data structures are not discarded by compiler
#ifdef PERSISTENT
mcsim_skip_instrs_begin();
printf("%d\n", (int)((sizeof undolog_G) + (sizeof redolog_G)));
mcsim_skip_instrs_end();
#endif // PERSISTENT
}
#ifdef _OPENMP
}
#endif
mcsim_skip_instrs_begin();
/* Verification */
#if 0
fprintf(stderr, "SDG data:\n");
for (int i=0; i<SDGdata->m; i++) {
fprintf(stderr, "[%ld %ld %ld] ", SDGdata->startVertex[i],
SDGdata->endVertex[i], SDGdata->weight[i]);
}
fprintf(stderr, "\n");
for (int i=0; i<G->n + 1; i++) {
fprintf(stderr, "[%ld] ", G->numEdges[i]);
}
fprintf(stderr, "\nGraph:\n");
for (int i=0; i<G->n; i++) {
for (int j=G->numEdges[i]; j<G->numEdges[i+1]; j++) {
fprintf(stderr, "[%ld %ld %ld] ", i, G->endV[j], G->weight[j]);
}
}
#endif
free(SDGdata->startVertex);
free(SDGdata->endVertex);
free(SDGdata->weight);
elapsed_time = get_seconds() - elapsed_time;
mcsim_skip_instrs_end();
return elapsed_time;
}
|
axpy_ompacc.c | // Experimental test input for Accelerator directives
// Liao 1/15/2013
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
/* change this to do saxpy or daxpy : single precision or double precision*/
#define REAL double
#define VEC_LEN 1024000 //use a fixed number for now
/* zero out the entire vector */
void zero(REAL *A, int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = 0.0;
}
}
/* initialize a vector with random floating point numbers */
void init(REAL *A, int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = (double)drand48();
}
}
void axpy_ompacc(REAL* x, REAL* y, int n, REAL a) {
int i;
/* this one defines both the target device name and data environment to map to,
I think here we need mechanism to tell the compiler the device type (could be multiple) so that compiler can generate the codes of different versions;
we also need to let the runtime know what the target device is so the runtime will chose the right function to call if the code are generated
#pragma omp target device (gpu0) map(x, y)
*/
#pragma omp target device (gpu0) map(inout: y[0:n]) map(in: x[0:n],a,n)
#pragma omp parallel for shared(x, y, n, a) private(i)
for (i = 0; i < n; ++i)
y[i] += a * x[i];
}
int main(int argc, char *argv[])
{
int n;
REAL *y_ompacc, *x;
REAL a = 123.456;
n = VEC_LEN;
y_ompacc = (REAL *) malloc(n * sizeof(REAL));
x = (REAL *) malloc(n * sizeof(REAL));
srand48(1<<12);
init(x, n);
init(y_ompacc, n);
/* openmp acc version */
axpy_ompacc(x, y_ompacc, n, a);
free(y_ompacc);
free(x);
return 0;
}
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% John Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/compare.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/statistic.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImageChannels() compares one or more image channels of an image
% to a reconstructed image and returns the difference image.
%
% The format of the CompareImageChannels method is:
%
% Image *CompareImageChannels(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
Image
*highlight_image;
highlight_image=CompareImageChannels(image,reconstruct_image,
CompositeChannels,metric,distortion,exception);
return(highlight_image);
}
MagickExport Image *CompareImageChannels(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
Image
*difference_image,
*highlight_image;
MagickBooleanType
status;
MagickPixelPacket
highlight,
lowlight,
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((reconstruct_image->columns != image->columns) ||
(reconstruct_image->rows != image->rows))
ThrowImageException(ImageError,"ImageSizeDiffers");
status=GetImageChannelDistortion(image,reconstruct_image,channel,metric,
distortion,exception);
if (status == MagickFalse)
return((Image *) NULL);
difference_image=CloneImage(image,0,0,MagickTrue,exception);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel);
highlight_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
if (SetImageStorageClass(highlight_image,DirectClass) == MagickFalse)
{
InheritException(exception,&highlight_image->exception);
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel);
(void) QueryMagickColor("#f1001ecc",&highlight,exception);
artifact=GetImageArtifact(image,"highlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&highlight,exception);
(void) QueryMagickColor("#ffffffcc",&lowlight,exception);
artifact=GetImageArtifact(image,"lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&lowlight,exception);
if (highlight_image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&lowlight);
}
/*
Generate difference image.
*/
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,highlight_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel,
reconstruct_pixel;
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register IndexPacket
*restrict highlight_indexes;
register PixelPacket
*restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns,
1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,highlight_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
highlight_indexes=GetCacheViewAuthenticIndexQueue(highlight_view);
pixel=zero;
reconstruct_pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickStatusType
difference;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x,
&reconstruct_pixel);
difference=MagickFalse;
if (channel == CompositeChannels)
{
if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse)
difference=MagickTrue;
}
else
{
if (((channel & RedChannel) != 0) &&
(GetPixelRed(p) != GetPixelRed(q)))
difference=MagickTrue;
if (((channel & GreenChannel) != 0) &&
(GetPixelGreen(p) != GetPixelGreen(q)))
difference=MagickTrue;
if (((channel & BlueChannel) != 0) &&
(GetPixelBlue(p) != GetPixelBlue(q)))
difference=MagickTrue;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
difference=MagickTrue;
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace)) &&
(GetPixelIndex(indexes+x) !=
GetPixelIndex(reconstruct_indexes+x)))
difference=MagickTrue;
}
if (difference != MagickFalse)
SetPixelPacket(highlight_image,&highlight,r,highlight_indexes+x);
else
SetPixelPacket(highlight_image,&lowlight,r,highlight_indexes+x);
p++;
q++;
r++;
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,image->compose,highlight_image,0,0);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortion() compares one or more image channels of an image
% to a reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageChannelDistortion method is:
%
% MagickBooleanType GetImageChannelDistortion(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelDistortion(image,reconstruct_image,CompositeChannels,
metric,distortion,exception);
return(status);
}
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_distortion[CompositeChannels+1];
MagickPixelPacket
pixel,
reconstruct_pixel;
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
pixel=zero;
reconstruct_pixel=pixel;
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x,
&reconstruct_pixel);
if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse)
{
if ((channel & RedChannel) != 0)
channel_distortion[RedChannel]++;
if ((channel & GreenChannel) != 0)
channel_distortion[GreenChannel]++;
if ((channel & BlueChannel) != 0)
channel_distortion[BlueChannel]++;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
channel_distortion[OpacityChannel]++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
channel_distortion[BlackChannel]++;
channel_distortion[CompositeChannels]++;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static size_t GetNumberChannels(const Image *image,
const ChannelType channel)
{
size_t
channels;
channels=0;
if ((channel & RedChannel) != 0)
channels++;
if ((channel & GreenChannel) != 0)
channels++;
if ((channel & BlueChannel) != 0)
channels++;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
channels++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
channels++;
return(channels);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance;
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(GetPixelRed(p)-(MagickRealType)
GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(GetPixelGreen(p)-(MagickRealType)
GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(GetPixelBlue(p)-(MagickRealType)
GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) ||
(reconstruct_image->matte != MagickFalse)))
{
distance=QuantumScale*((image->matte != MagickFalse ?
GetPixelOpacity(p) : OpaqueOpacity)-
(reconstruct_image->matte != MagickFalse ?
GetPixelOpacity(q): OpaqueOpacity));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(GetPixelIndex(indexes+x)-
(MagickRealType) GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) image->columns*image->rows);
if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) ||
(reconstruct_image->matte != MagickFalse)))
distortion[CompositeChannels]/=(double) (GetNumberChannels(image,channel)-1);
else
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,
reconstruct_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance;
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs(GetPixelRed(p)-(double)
GetPixelRed(q));
channel_distortion[RedChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs(GetPixelGreen(p)-(double)
GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs(GetPixelBlue(p)-(double)
GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
channel_distortion[OpacityChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs(GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) image->columns*image->rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
MagickRealType
alpha,
area,
beta,
maximum_error,
mean_error;
ssize_t
y;
status=MagickTrue;
alpha=1.0;
beta=1.0;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance;
if ((channel & OpacityChannel) != 0)
{
if (image->matte != MagickFalse)
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
if (reconstruct_image->matte != MagickFalse)
beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q));
}
if ((channel & RedChannel) != 0)
{
distance=fabs(alpha*GetPixelRed(p)-beta*
GetPixelRed(q));
distortion[RedChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & GreenChannel) != 0)
{
distance=fabs(alpha*GetPixelGreen(p)-beta*
GetPixelGreen(q));
distortion[GreenChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & BlueChannel) != 0)
{
distance=fabs(alpha*GetPixelBlue(p)-beta*
GetPixelBlue(q));
distortion[BlueChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=fabs((double) GetPixelOpacity(p)-
GetPixelOpacity(q));
distortion[OpacityChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs(alpha*GetPixelIndex(indexes+x)-beta*
GetPixelIndex(reconstruct_indexes+x));
distortion[BlackChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositeChannels]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,
reconstruct_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance;
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(GetPixelRed(p)-(MagickRealType)
GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(GetPixelGreen(p)-(MagickRealType)
GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(GetPixelBlue(p)-(MagickRealType)
GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*(GetPixelOpacity(p)-(MagickRealType)
GetPixelOpacity(q));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(GetPixelIndex(indexes+x)-
(MagickRealType) GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) image->columns*image->rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
area;
register ssize_t
i;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageChannelStatistics(image,exception);
reconstruct_statistics=GetImageChannelStatistics(reconstruct_image,exception);
status=MagickTrue;
progress=0;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]=0.0;
area=1.0/((MagickRealType) image->columns*image->rows-1);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
distortion[RedChannel]+=area*QuantumScale*(GetPixelRed(p)-
image_statistics[RedChannel].mean)*(GetPixelRed(q)-
reconstruct_statistics[RedChannel].mean);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]+=area*QuantumScale*(GetPixelGreen(p)-
image_statistics[GreenChannel].mean)*(GetPixelGreen(q)-
reconstruct_statistics[GreenChannel].mean);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]+=area*QuantumScale*(GetPixelBlue(p)-
image_statistics[BlueChannel].mean)*(GetPixelBlue(q)-
reconstruct_statistics[BlueChannel].mean);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
distortion[OpacityChannel]+=area*QuantumScale*(
GetPixelOpacity(p)-image_statistics[OpacityChannel].mean)*
(GetPixelOpacity(q)-
reconstruct_statistics[OpacityChannel].mean);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
distortion[BlackChannel]+=area*QuantumScale*(
GetPixelIndex(indexes+x)-
image_statistics[OpacityChannel].mean)*(
GetPixelIndex(reconstruct_indexes+x)-
reconstruct_statistics[OpacityChannel].mean);
p++;
q++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
gamma;
gamma=image_statistics[i].standard_deviation*
reconstruct_statistics[i].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
}
distortion[CompositeChannels]=0.0;
if ((channel & RedChannel) != 0)
distortion[CompositeChannels]+=distortion[RedChannel]*
distortion[RedChannel];
if ((channel & GreenChannel) != 0)
distortion[CompositeChannels]+=distortion[GreenChannel]*
distortion[GreenChannel];
if ((channel & BlueChannel) != 0)
distortion[CompositeChannels]+=distortion[BlueChannel]*
distortion[BlueChannel];
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
distortion[CompositeChannels]+=distortion[OpacityChannel]*
distortion[OpacityChannel];
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
distortion[CompositeChannels]+=distortion[BlackChannel]*
distortion[BlackChannel];
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]/
GetNumberChannels(image,channel));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
ssize_t
y;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,
reconstruct_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance;
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs(GetPixelRed(p)-(double)
GetPixelRed(q));
if (distance > channel_distortion[RedChannel])
channel_distortion[RedChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs(GetPixelGreen(p)-(double)
GetPixelGreen(q));
if (distance > channel_distortion[GreenChannel])
channel_distortion[GreenChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs(GetPixelBlue(p)-(double)
GetPixelBlue(q));
if (distance > channel_distortion[BlueChannel])
channel_distortion[BlueChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
if (distance > channel_distortion[OpacityChannel])
channel_distortion[OpacityChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs(GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reconstruct_indexes+x));
if (distance > channel_distortion[BlackChannel])
channel_distortion[BlackChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
if (channel_distortion[i] > distortion[i])
distortion[i]=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
distortion[RedChannel]=20.0*log10((double) 1.0/sqrt(
distortion[RedChannel]));
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]=20.0*log10((double) 1.0/sqrt(
distortion[GreenChannel]));
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]=20.0*log10((double) 1.0/sqrt(
distortion[BlueChannel]));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
distortion[OpacityChannel]=20.0*log10((double) 1.0/sqrt(
distortion[OpacityChannel]));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
distortion[BlackChannel]=20.0*log10((double) 1.0/sqrt(
distortion[BlackChannel]));
distortion[CompositeChannels]=20.0*log10((double) 1.0/sqrt(
distortion[CompositeChannels]));
return(status);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
distortion[RedChannel]=sqrt(distortion[RedChannel]);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]=sqrt(distortion[GreenChannel]);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]=sqrt(distortion[BlueChannel]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
distortion[OpacityChannel]=sqrt(distortion[OpacityChannel]);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
distortion[BlackChannel]=sqrt(distortion[BlackChannel]);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
MagickExport MagickBooleanType GetImageChannelDistortion(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((reconstruct_image->columns != image->columns) ||
(reconstruct_image->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositeChannels];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortions() compares the image channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageChannelDistortions method is:
%
% double *GetImageChannelDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageChannelDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((reconstruct_image->columns != image->columns) ||
(reconstruct_image->rows != image->rows))
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageSizeDiffers","`%s'",image->filename);
return((double *) NULL);
}
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(Image *image,
% const Image *reconstruct_image)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
*/
MagickExport MagickBooleanType IsImagesEqual(Image *image,
const Image *reconstruct_image)
{
CacheView
*image_view,
*reconstruct_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickRealType
area,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickSignature);
if ((reconstruct_image->columns != image->columns) ||
(reconstruct_image->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance;
distance=fabs(GetPixelRed(p)-(double)
GetPixelRed(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs(GetPixelGreen(p)-(double)
GetPixelGreen(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs(GetPixelBlue(p)-(double)
GetPixelBlue(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
if (image->matte != MagickFalse)
{
distance=fabs(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs(GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reconstruct_indexes+x));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
(void) status;
similarity_image=DestroyImage(similarity_image);
return(distortion);
}
MagickExport Image *SimilarityImage(Image *image,const Image *reference,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
Image
*similarity_image;
similarity_image=SimilarityMetricImage(image,reference,
RootMeanSquaredErrorMetric,offset,similarity_metric,exception);
return(similarity_image);
}
MagickExport Image *SimilarityMetricImage(Image *image,const Image *reference,
const MetricType metric,RectangleInfo *offset,double *similarity_metric,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=1.0;
if ((reference->columns > image->columns) || (reference->rows > image->rows))
ThrowImageException(ImageError,"ImageSizeDiffers");
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(similarity_image,DirectClass) == MagickFalse)
{
InheritException(exception,&similarity_image->exception);
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireVirtualCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if (similarity < *similarity_metric)
{
*similarity_metric=similarity;
offset->x=x;
offset->y=y;
}
SetPixelRed(q,ClampToQuantum(QuantumRange-QuantumRange*
similarity));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
sgemm.c | #include "blas.h"
#include "error.h"
#include <stdio.h>
#include "handle.h"
#include "config.h"
#include "sgemm.fatbin.c"
static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; }
static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; }
static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
const void * B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static const float zero = 0.0f;
static const float one = 1.0f;
void sgemm(CBlasTranspose transA, CBlasTranspose transB,
size_t m, size_t n, size_t k,
float alpha, const float * restrict A, size_t lda, const float * restrict B, size_t ldb,
float beta, float * restrict C, size_t ldc) {
const size_t nRowA = (transA == CBlasNoTrans) ? m : k;
const size_t nRowB = (transB == CBlasNoTrans) ? k : n;
int info = 0;
if (lda < nRowA)
info = 8;
else if (ldb < nRowB)
info = 10;
else if (ldc < m)
info = 13;
if (info != 0) {
XERBLA(info);
return;
}
if (m == 0 || n == 0 || ((alpha == zero || k == 0) && beta == one))
return;
if (alpha == zero) {
if (beta == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
C[j * ldc + i] = zero;
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
C[j * ldc + i] *= beta;
}
}
return;
}
if (transB == CBlasNoTrans) {
if (transA == CBlasNoTrans) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (beta == zero) {
for (size_t i = 0; i < m; i++)
C[j * ldc + i] = zero;
}
else if (beta != one) {
for (size_t i = 0; i < m; i++)
C[j * ldc + i] *= beta;
}
for (size_t l = 0; l < k; l++) {
if (B[j * ldb + l] != zero) {
register float temp = alpha * B[j * ldb + l];
for (size_t i = 0; i < m; i++)
C[j * ldc + i] += temp * A[l * lda + i];
}
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register float temp = zero;
for (size_t l = 0; l < k; l++)
temp += A[i * lda + l] * B[j * ldb + l];
if (beta == zero)
C[j * ldc + i] = alpha * temp;
else
C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i];
}
}
}
}
else {
if (transA == CBlasNoTrans) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (beta == zero) {
for (size_t i = 0; i < m; i++)
C[j * ldc + i] = zero;
}
else if (beta != one) {
for (size_t i = 0; i < m; i++)
C[j * ldc + i] *= beta;
}
for (size_t l = 0; l < k; l++) {
if (B[l * ldb + j] != zero) {
register float temp = alpha * B[l * ldb + j];
for (size_t i = 0; i < m; i++)
C[j * ldc + i] += temp * A[l * lda + i];
}
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register float temp = zero;
for (size_t l = 0; l < k; l++)
temp += A[i * lda + l] * B[l * ldb + j];
if (beta == zero)
C[j * ldc + i] = alpha * temp;
else
C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i];
}
}
}
}
}
CUresult cuSgemm2(CUBLAShandle handle, CBlasTranspose transA, CBlasTranspose transB,
size_t m, size_t n, size_t k,
float alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb,
float beta, CUdeviceptr C, size_t ldc, CUdeviceptr D, size_t ldd,
CUstream stream) {
const size_t nRowA = (transA == CBlasNoTrans) ? m : k;
const size_t nRowB = (transB == CBlasNoTrans) ? k : n;
int info = 0;
if (lda < nRowA)
info = 8;
else if (ldb < nRowB)
info = 10;
else if (ldc < m)
info = 13;
else if (ldd < m)
info = 15;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0 || (C == D && (alpha == zero || k == 0) && beta == one))
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
if (handle->sgemm2 == NULL)
CU_ERROR_CHECK(cuModuleLoadData(&handle->sgemm2, imageBytes));
const unsigned int mb = (transA == CBlasNoTrans) ? 64 : 32;
const unsigned int nb = (transA == CBlasNoTrans) ? 16 : 32;
const unsigned int kb = (transA == CBlasNoTrans) ? 16 : 8;
const unsigned int bx = (transA == CBlasNoTrans) ? 16 : 8;
const unsigned int by = (transA == CBlasNoTrans) ? 4 : 8;
char name[85];
snprintf(name, 85,
"_Z6sgemm2IL14CBlasTranspose%dELS0_%dELj%uELj%uELj%uELj%uELj%uEEvPKfS2_S2_Pfffiiiiiii",
transA, transB, mb, nb, kb, bx, by);
CUfunction function;
CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->sgemm2, name));
void * params[] = { &A, &B, &C, &D, &alpha, &beta, &lda, &ldb, &ldc, &ldd, &m, &n, &k };
CU_ERROR_CHECK(cuLaunchKernel(function, (unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1,
bx, by, 1, 0, stream, params, NULL));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
struct sgemm_args {
CUBLAShandle handle;
const float * A, * B;
float * C;
size_t m, n, k, lda, ldb, ldc;
float alpha, beta;
CBlasTranspose transA, transB;
};
static CUresult background_sgemm(const void * a) {
struct sgemm_args * args = (struct sgemm_args *)a;
CUBLAShandle handle = args->handle;
// Block sizes
const size_t mb = (args->transA == CBlasNoTrans) ? SGEMM_N_MB : SGEMM_T_MB;
const size_t nb = (args->transA == CBlasNoTrans) ? SGEMM_N_NB : SGEMM_T_NB;
const size_t kb = (args->transA == CBlasNoTrans) ? SGEMM_N_KB : SGEMM_T_KB;
// Temporary device memory and streams
CUdeviceptr A0, A1, B0, B1, C;
size_t lda, ldb, ldc;
CUstream copy, compute;
// Allocate two matrices for blocks of A and B on the device and one for a
// block of C
if (args->transA == CBlasNoTrans) {
CU_ERROR_CHECK(cuMemAllocPitch(&A0, &lda, mb * sizeof(float), kb, sizeof(float)));
CU_ERROR_CHECK(cuMemAllocPitch(&A1, &lda, mb * sizeof(float), kb, sizeof(float)));
}
else {
CU_ERROR_CHECK(cuMemAllocPitch(&A0, &lda, kb * sizeof(float), mb, sizeof(float)));
CU_ERROR_CHECK(cuMemAllocPitch(&A1, &lda, kb * sizeof(float), mb, sizeof(float)));
}
lda /= sizeof(float);
if (args->transB == CBlasNoTrans) {
CU_ERROR_CHECK(cuMemAllocPitch(&B0, &ldb, kb * sizeof(float), nb, sizeof(float)));
CU_ERROR_CHECK(cuMemAllocPitch(&B1, &ldb, kb * sizeof(float), nb, sizeof(float)));
}
else {
CU_ERROR_CHECK(cuMemAllocPitch(&B0, &ldb, nb * sizeof(float), kb, sizeof(float)));
CU_ERROR_CHECK(cuMemAllocPitch(&B1, &ldb, nb * sizeof(float), kb, sizeof(float)));
}
ldb /= sizeof(float);
CU_ERROR_CHECK(cuMemAllocPitch(&C, &ldc, mb * sizeof(float), nb, sizeof(float)));
ldc /= sizeof(float);
// Create streams
CU_ERROR_CHECK(cuStreamCreate(©, CU_STREAM_NON_BLOCKING));
CU_ERROR_CHECK(cuStreamCreate(&compute, CU_STREAM_NON_BLOCKING));
// Copy C onto the device using the compute stream
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(C, ldc, 0, 0,
args->C, args->ldc, 0, 0,
args->m, args->n, sizeof(float), compute));
// Perform C *= beta on the compute stream to ensure C has finished copying
CU_ERROR_CHECK(cuSgemm(handle, CBlasNoTrans, CBlasNoTrans,
args->m, args->n, 0,
zero, 0, ldc, 0, 0,
args->beta, C, ldc, compute));
// Can exit early if alpha * op(A) * op(B) will evaluate to zero
if (args->alpha != zero && args->k > 0) {
// Perform C += alpha * op(A) * op(B)
if (args->transB == CBlasNoTrans) {
if (args->transA == CBlasNoTrans) {
// Copy A and B onto the device asynchronously on the same stream as C
const size_t lb = min(args->k, kb);
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A0, lda, 0, 0,
args->A, args->lda, 0, 0,
args->m, lb, sizeof(float), compute));
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B0, ldb, 0, 0,
args->B, args->ldb, 0, 0,
lb, args->n, sizeof(float), compute));
for (size_t l = 0; l < args->k; l += kb) {
// Compute C on the same stream as the copies to ensure they have finished first
CU_ERROR_CHECK(cuSgemm(handle, args->transA, args->transB,
args->m, args->n, min(args->k - l, kb),
args->alpha, A0, lda, B0, ldb,
one, C, ldc, compute));
// If there is more work to do
if (l + kb < args->k) {
const size_t lb = min(args->k - l - kb, kb);
// Copy the next blocks of A and B on the opposite stream from the sgemm
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A1, lda, 0, 0,
args->A, args->lda, 0, l + kb,
args->m, lb, sizeof(float), copy));
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B1, ldb, 0, 0,
args->B, args->ldb, l + kb, 0,
lb, args->n, sizeof(float), copy));
// Swap the streams and pointers so that the compute starts after the copy
CUstream stream = compute; compute = copy; copy = stream;
CUdeviceptr ptr = A0; A0 = A1; A1 = ptr;
ptr = B0; B0 = B1; B1 = ptr;
}
}
}
else {
// Copy A and B onto the device asynchronously on the same stream as C
const size_t lb = min(args->k, kb);
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A0, lda, 0, 0,
args->A, args->lda, 0, 0,
lb, args->m, sizeof(float), compute));
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B0, ldb, 0, 0,
args->B, args->ldb, 0, 0,
lb, args->n, sizeof(float), compute));
for (size_t l = 0; l < args->k; l += kb) {
// Compute C on the same stream as the copies to ensure they have finished first
CU_ERROR_CHECK(cuSgemm(handle, args->transA, args->transB,
args->m, args->n, min(args->k - l, kb),
args->alpha, A0, lda, B0, ldb,
one, C, ldc, compute));
// If there is more work to do
if (l + kb < args->k) {
const size_t lb = min(args->k - l - kb, kb);
// Copy the next blocks of A and B on the opposite stream from the sgemm
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A1, lda, 0, 0,
args->A, args->lda, l + kb, 0,
lb, args->m, sizeof(float), copy));
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B1, ldb, 0, 0,
args->B, args->ldb, l + kb, 0,
lb, args->n, sizeof(float), copy));
// Swap the streams and pointers so that the compute starts after the copy
CUstream stream = compute; compute = copy; copy = stream;
CUdeviceptr ptr = A0; A0 = A1; A1 = ptr;
ptr = B0; B0 = B1; B1 = ptr;
}
}
}
}
else {
if (args->transA == CBlasNoTrans) {
// Copy A and B onto the device asynchronously on the same stream as C
const size_t lb = min(args->k, kb);
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A0, lda, 0, 0,
args->A, args->lda, 0, 0,
args->m, lb, sizeof(float), compute));
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B0, ldb, 0, 0,
args->B, args->ldb, 0, 0,
args->n, lb, sizeof(float), compute));
for (size_t l = 0; l < args->k; l += kb) {
// Compute C on the same stream as the copies to ensure they have finished first
CU_ERROR_CHECK(cuSgemm(handle, args->transA, args->transB,
args->m, args->n, min(args->k - l, kb),
args->alpha, A0, lda, B0, ldb,
one, C, ldc, compute));
// If there is more work to do
if (l + kb < args->k) {
const size_t lb = min(args->k - l - kb, kb);
// Copy the next blocks of A and B on the opposite stream from the sgemm
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A1, lda, 0, 0,
args->A, args->lda, 0, l + kb,
args->m, lb, sizeof(float), copy));
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B1, ldb, 0, 0,
args->B, args->ldb, 0, l + kb,
args->n, lb, sizeof(float), copy));
// Swap the streams and pointers so that the compute starts after the copy
CUstream stream = compute; compute = copy; copy = stream;
CUdeviceptr ptr = A0; A0 = A1; A1 = ptr;
ptr = B0; B0 = B1; B1 = ptr;
}
}
}
else {
// Copy A and B onto the device asynchronously on the same stream as C
const size_t lb = min(args->k, kb);
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A0, lda, 0, 0,
args->A, args->lda, 0, 0,
lb, args->m, sizeof(float), compute));
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B0, ldb, 0, 0,
args->B, args->ldb, 0, 0,
args->n, lb, sizeof(float), compute));
for (size_t l = 0; l < args->k; l += kb) {
// Compute C on the same stream as the copies to ensure they have finished first
CU_ERROR_CHECK(cuSgemm(handle, args->transA, args->transB,
args->m, args->n, min(args->k - l, kb),
args->alpha, A0, lda, B0, ldb,
one, C, ldc, compute));
// If there is more work to do
if (l + kb < args->k) {
const size_t lb = min(args->k - l - kb, kb);
// Copy the next blocks of A and B on the opposite stream from the sgemm
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A1, lda, 0, 0,
args->A, args->lda, l + kb, 0,
lb, args->m, sizeof(float), copy));
CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B1, ldb, 0, 0,
args->B, args->ldb, 0, l + kb,
args->n, lb, sizeof(float), copy));
// Swap the streams and pointers so that the compute starts after the copy
CUstream stream = compute; compute = copy; copy = stream;
CUdeviceptr ptr = A0; A0 = A1; A1 = ptr;
ptr = B0; B0 = B1; B1 = ptr;
}
}
}
}
}
// Copy C back onto the host on the compute stream
CU_ERROR_CHECK(cuMemcpyDtoH2DAsync(args->C, args->ldc, 0, 0, C, ldc, 0, 0,
args->m, args->n, sizeof(float), compute));
// Clean up temporary memory and streams
CU_ERROR_CHECK(cuMemFree(A0));
CU_ERROR_CHECK(cuMemFree(A1));
CU_ERROR_CHECK(cuMemFree(B0));
CU_ERROR_CHECK(cuMemFree(B1));
CU_ERROR_CHECK(cuMemFree(C));
CU_ERROR_CHECK(cuStreamDestroy(copy));
CU_ERROR_CHECK(cuStreamDestroy(compute));
return CUDA_SUCCESS;
}
CUresult cuMultiGPUSgemm(CUmultiGPUBLAShandle handle,
CBlasTranspose transA, CBlasTranspose transB,
size_t m, size_t n, size_t k,
float alpha, const float * restrict A, size_t lda,
const float * restrict B, size_t ldb,
float beta, float * restrict C, size_t ldc) {
const size_t nRowA = (transA == CBlasNoTrans) ? m : k;
const size_t nRowB = (transB == CBlasNoTrans) ? k : n;
int info = 0;
if (lda < nRowA)
info = 8;
else if (ldb < nRowB)
info = 10;
else if (ldc < m)
info = 13;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0 || ((alpha == zero || k == 0) && beta == one))
return CUDA_SUCCESS;
if (alpha == zero) {
if (beta == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
C[j * ldc + i] = zero;
}
}
else {
for (size_t j = 0; j < n; j++) {
#pragma omp parallel for
for (size_t i = 0; i < m; i++)
C[j * ldc + i] *= beta;
}
}
return CUDA_SUCCESS;
}
const size_t mb = (transA == CBlasNoTrans) ? SGEMM_N_MB : SGEMM_T_MB;
const size_t nb = (transA == CBlasNoTrans) ? SGEMM_N_NB : SGEMM_T_NB;
if (m < mb && n < nb) {
sgemm(transA, transB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
return CUDA_SUCCESS;
}
int task = 0, nTasks = (int)(((m + mb - 1) / mb) * ((n + nb - 1) / nb));
CUtask tasks[nTasks];
int ctx = 0;
int nCtxs = cuMultiGPUGetContextCount(handle->mGPU);
struct sgemm_args args = { .transA = transA, .transB = transB,
.k = k,
.alpha = alpha, .lda = lda, .ldb = ldb,
.beta = beta, .ldc = ldc };
if (transB == CBlasNoTrans) {
if (transA == CBlasNoTrans) {
for (size_t j = 0; j < n; j += nb) {
args.n = min(n - j, nb);
for (size_t i = 0; i < m; i += mb) {
args.m = min(m - i, mb);
args.A = &A[i];
args.B = &B[j * ldb];
args.C = &C[j * ldc + i];
args.handle = &handle->handles[ctx];
CU_ERROR_CHECK(cuTaskCreate(&tasks[task], background_sgemm, &args, sizeof(struct sgemm_args)));
CU_ERROR_CHECK(cuMultiGPURunTask(handle->mGPU, ctx++, tasks[task++]));
if (ctx == nCtxs)
ctx = 0;
}
}
}
else {
for (size_t j = 0; j < n; j += nb) {
args.n = min(n - j, nb);
for (size_t i = 0; i < m; i += mb) {
args.m = min(m - i, mb);
args.A = &A[i * lda];
args.B = &B[j * ldb];
args.C = &C[j * ldc + i];
args.handle = &handle->handles[ctx];
CU_ERROR_CHECK(cuTaskCreate(&tasks[task], background_sgemm, &args, sizeof(struct sgemm_args)));
CU_ERROR_CHECK(cuMultiGPURunTask(handle->mGPU, ctx++, tasks[task++]));
if (ctx == nCtxs)
ctx = 0;
}
}
}
}
else {
if (transA == CBlasNoTrans) {
for (size_t j = 0; j < n; j += nb) {
args.n = min(n - j, nb);
for (size_t i = 0; i < m; i += mb) {
args.m = min(m - i, mb);
args.A = &A[i];
args.B = &B[j];
args.C = &C[j * ldc + i];
args.handle = &handle->handles[ctx];
CU_ERROR_CHECK(cuTaskCreate(&tasks[task], background_sgemm, &args, sizeof(struct sgemm_args)));
CU_ERROR_CHECK(cuMultiGPURunTask(handle->mGPU, ctx++, tasks[task++]));
if (ctx == nCtxs)
ctx = 0;
}
}
}
else {
for (size_t j = 0; j < n; j += nb) {
args.n = min(n - j, nb);
for (size_t i = 0; i < m; i += mb) {
args.m = min(m - i, mb);
args.A = &A[i * lda];
args.B = &B[j];
args.C = &C[j * ldc + i];
args.handle = &handle->handles[ctx];
CU_ERROR_CHECK(cuTaskCreate(&tasks[task], background_sgemm, &args, sizeof(struct sgemm_args)));
CU_ERROR_CHECK(cuMultiGPURunTask(handle->mGPU, ctx++, tasks[task++]));
if (ctx == nCtxs)
ctx = 0;
}
}
}
}
CUresult result;
for (task = 0; task < nTasks; task++)
CU_ERROR_CHECK(cuTaskDestroy(tasks[task], &result));
return result;
}
|
GB_unop__acosh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__acosh_fp64_fp64
// op(A') function: GB_unop_tran__acosh_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = acosh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = acosh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = acosh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOSH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__acosh_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = acosh (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__acosh_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_for_firstprivate.c | <ompts:test>
<ompts:testdescription>Test which checks the omp for firstprivate clause by counting up a variable in a parallelized loop. Each thread has a firstprivate variable (1) and an variable (2) declared by for firstprivate. First it stores the result of its last iteration in variable (2). Then it stores the value of the variable (2) in its firstprivate variable (1). At the end all firstprivate variables (1) are added to a total sum in a critical section and compared with the correct result.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp for firstprivate</ompts:directive>
<ompts:dependences>omp critical,omp parallel firstprivate</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int sum1;
#pragma omp threadprivate(sum1)
int <ompts:testcode:functionname>omp_for_firstprivate</ompts:testcode:functionname> (FILE * logFile)
{
int sum;
<ompts:orphan:vars>
int sum0;
</ompts:orphan:vars>
int known_sum;
int threadsnum;
sum = 0;
sum0 = 12345;
sum1 = 0;
#pragma omp parallel
{
#pragma omp single
{
threadsnum=omp_get_num_threads();
}
/* sum0 = 0; */
<ompts:orphan>
int i;
#pragma omp for <ompts:check>firstprivate(sum0)</ompts:check>
for (i = 1; i <= LOOPCOUNT; i++)
{
sum0 = sum0 + i;
sum1 = sum0;
} /* end of for */
</ompts:orphan>
#pragma omp critical
{
sum = sum + sum1;
} /* end of critical */
} /* end of parallel */
known_sum = 12345* threadsnum+ (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
return (known_sum == sum);
}
</ompts:testcode>
</ompts:test>
|
stream_mmap.c | /*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
# include <stdlib.h>
# include <unistd.h>
# include <sys/mman.h>
# include <sys/stat.h>
# include <time.h>
# include <fcntl.h>
#ifndef MAP_CXL_MEM
#define MAP_CXL_MEM 0x200000
#endif
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 100000000 /* kyungsan : increased 10 times */
/*# define STREAM_ARRAY_SIZE 10000000 */
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 2500
#endif
#endif
#ifndef NTIMES
# define NTIMES 2500
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
STREAM_TYPE * a[2] = {NULL, NULL};
STREAM_TYPE * b[2] = {NULL, NULL};
STREAM_TYPE * c[2] = {NULL, NULL};
//STREAM_TYPE * d = NULL;
/*
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
*/
/*
FILE * fp;
void LogWrite(char * latency, unsigned int x, unsigned int y)
{
char x1[30];
char y1[30];
sprintf(x1, "%d", x);
sprintf(y1, "%d", y);
fputs(x1, fp);
fputs(" ", fp);
fputs(y1, fp);
fputs(" ", fp);
fputs(latency, fp);
fputs("\n", fp);
}
*/
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
unsigned int flagForThread = MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_POPULATE|MAP_CXL_MEM;
unsigned int flagForThread_CXL = MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_POPULATE|MAP_CXL_MEM;
/* --- SETUP --- determine precision and check timing --- */
int suspendValue; int rv;
int i;
srand(time(NULL));
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf("Alloc heap start \n");
double arr_size = sizeof(STREAM_TYPE) * (STREAM_ARRAY_SIZE+OFFSET);
//a = malloc(arr_size);
//b = malloc(arr_size);
//c = malloc(arr_size);
//a = mmap(NULL, arr_size, PROT_READ|PROT_WRITE, flagForThread, -1, 0);
a[0] = mmap(NULL, arr_size, PROT_READ|PROT_WRITE, flagForThread, -1, 0);
b[0] = mmap(NULL, arr_size, PROT_READ|PROT_WRITE, flagForThread, -1, 0);
c[0] = mmap(NULL, arr_size, PROT_READ|PROT_WRITE, flagForThread, -1, 0);
printf("key input start \n");
rv = scanf("%d",&suspendValue);
//d = mmap(NULL, DUMMYSize, PROT_READ|PROT_WRITE, flagForThread, -1, 0);
//b = mmap(NULL, arr_size, PROT_READ|PROT_WRITE, flagForThread, -1, 0);
//c = mmap(NULL, arr_size, PROT_READ|PROT_WRITE, flagForThread, -1, 0);
a[1] = mmap(NULL, arr_size, PROT_READ|PROT_WRITE, flagForThread, -1, 0);
b[1] = mmap(NULL, arr_size, PROT_READ|PROT_WRITE, flagForThread, -1, 0);
c[1] = mmap(NULL, arr_size, PROT_READ|PROT_WRITE, flagForThread, -1, 0);
/* if(!a || !b || !c){
printf("Alloc heap failure \n");
exit(1);
}*/
printf("Alloc heap done \n");
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[0][j] = 1.0;
b[0][j] = 2.0;
c[0][j] = 0.0;
a[1][j] = 1.0;
b[1][j] = 2.0;
c[1][j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
{
a[0][j] = 2.0E0 * a[0][j];
}
t = 1.0E6 * (mysecond() - t);
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
{
a[1][j] = 2.0E0 * a[1][j];
}
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
int randValuea = 0;
int randValueb = 0;
int randValuec = 0;
printf("MEASURE START \n");
for (k=0; k<NTIMES; k++)
{
randValuea = rand() % 2;
randValueb = rand() % 2;
randValuec = rand() % 2;
printf("RunTime %d\n", k);
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[randValueb][j] = a[randValuea][j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[randValueb][j] = scalar*(c[randValuec][j]);
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[randValuec][j] = a[randValuea][j]+b[randValueb][j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[randValuea][j] = b[randValueb][j]+scalar*c[randValuec][j];
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
//checkSTREAMresults();
printf(HLINE);
int zx = 0;
int xx = 0;
for(xx = 0; xx < 4; xx++)
{
for(zx = 0; zx < NTIMES; zx++)
{
printf("T :%d, %d %11.6f\n", xx, zx, times[xx][zx]);
//char latency[30];
//sprintf(latency,"%11.6f\n", times[xx][zx]);
}
}
int ret = 0;
if(munmap(a[0], arr_size) == -1)
{
printf("a munmap error\n");
return 0;
}
if(munmap(b[0], arr_size) == -1)
{
printf("b munmap error\n");
return 0;
}
if(munmap(c[0], arr_size) == -1)
{
printf("c munmap error\n");
return 0;
}
if(munmap(a[1], arr_size) == -1)
{
printf("a munmap error\n");
return 0;
}
if(munmap(b[1], arr_size) == -1)
{
printf("b munmap error\n");
return 0;
}
if(munmap(c[1], arr_size) == -1)
{
printf("c munmap error\n");
return 0;
}
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults ()
{
STREAM_TYPE aj,bj,cj,scalar;
STREAM_TYPE aSumErr,bSumErr,cSumErr;
STREAM_TYPE aAvgErr,bAvgErr,cAvgErr;
double epsilon;
ssize_t j;
int k,ierr,err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
// aSumErr += abs(a[j] - aj);
// bSumErr += abs(b[j] - bj);
// cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
}
else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr/aj) > epsilon) {
err++;
printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
//if (abs(a[j]/aj-1.0) > epsilon) {
{
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
// printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
// j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n",ierr);
}
if (abs(bAvgErr/bj) > epsilon) {
err++;
printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
//if (abs(b[j]/bj-1.0) > epsilon) {
{
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
// printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
// j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n",ierr);
}
if (abs(cAvgErr/cj) > epsilon) {
err++;
printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
//if (abs(c[j]/cj-1.0) > epsilon) {
{
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
// printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
// j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
}
/* end of stubs for the "tuned" versions of the kernels */
#endif
|
lock.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
int main()
{
//need to use an OpenMP construct so that OMPT will be initalized
#pragma omp parallel num_threads(1)
print_ids(0);
omp_lock_t lock;
printf("%" PRIu64 ": &lock: %" PRIu64 "\n", ompt_get_thread_data()->value, (uint64_t) &lock);
omp_init_lock(&lock);
print_current_address(1);
omp_set_lock(&lock);
print_current_address(2);
omp_unset_lock(&lock);
print_current_address(3);
omp_destroy_lock(&lock);
print_current_address(4);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: &lock: [[WAIT_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_init_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_destroy_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
remap.c | #define _GNU_SOURCE
#include <assert.h> /* assert */
#include <omp.h> /* openmp library */
#include <stdio.h> /* printf */
#include <stdlib.h> /* EXIT_SUCCESS */
#include <string.h> /* memset */
#include <sys/mman.h> /* mmap, mremap, munmap */
#include <unistd.h> /* sysconf */
int main()
{
int ret;
size_t i, len, pagesize;
char * addr;
pagesize = sysconf(_SC_PAGESIZE);
len = 4*pagesize;
/* mmap a persistent memory region. */
addr = mmap(NULL, len, PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
assert(MAP_FAILED != addr);
/* initialize memory region with invalid values. */
memset(addr, ~0, len);
/* remove all access privileges from persistent memory region. */
ret = mprotect(addr, len, PROT_NONE);
assert(-1 != ret);
#pragma omp parallel num_threads(4) default(none) shared(addr,pagesize) \
private(ret)
{
int tid;
char * taddr;
tid = omp_get_thread_num();
/* mmap a page into a temporary address with write privileges. */
taddr = mmap(NULL, pagesize, PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
assert(MAP_FAILED != taddr);
/* fill temporary page. */
memset(taddr, tid, pagesize);
/* remove write privileges from temporary page and grant read-only
* privileges. */
ret = mprotect(taddr, pagesize, PROT_READ);
assert(-1 != ret);
/* mremap temporary page to the correct location in persistent memory
* region. */
taddr = mremap(taddr, pagesize, pagesize, MREMAP_MAYMOVE|MREMAP_FIXED,
addr+tid*pagesize);
assert(MAP_FAILED != taddr);
}
/* validate results. */
for (i=0; i<len; ++i)
assert((char)(i/pagesize) == addr[i]);
/* coalesce adjacent mappings (unnecessary). */
//addr = mremap(addr, len, len, 0);
//assert(MAP_FAILED != addr);
/* unmap persistent memory region. */
munmap(addr, len);
return EXIT_SUCCESS;
}
|
copyprivate3.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define LOOPCOUNT 100
int main(void)
{
int result = 0;
int nr_iterations = 0;
int i;
int j;
#pragma omp parallel private(i,j)
{
for (i = 0; i < LOOPCOUNT; i++)
{
#pragma omp single copyprivate(j)
{
nr_iterations++;
j = i;
}
/* #pragma omp barrier */
#pragma omp critical
{
result = result + j - i;
}
#pragma omp barrier
} /* end of for */
} /* end of parallel */
printf("result=%d nr_iterations=%d\n",result, nr_iterations);
return (result == 0) && (nr_iterations == LOOPCOUNT);
}
|
init_ops_random.c | #include <stdio.h>
#include <stdlib.h>
#include "init_ops.h"
#include "utility.h"
#include <time.h>
#include <limits.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// state randomization
unsigned long xor128(unsigned long* state);
double random_uniform(unsigned long* state);
double random_normal(unsigned long* state);
void initialize_Haar_random_state_with_seed_single(CTYPE *state, ITYPE dim, UINT seed);
void initialize_Haar_random_state_with_seed_parallel(CTYPE *state, ITYPE dim, UINT seed);
void initialize_Haar_random_state(CTYPE *state, ITYPE dim) {
initialize_Haar_random_state_with_seed(state, dim, (unsigned)time(NULL));
}
void initialize_Haar_random_state_with_seed(CTYPE *state, ITYPE dim, UINT seed) {
#ifdef _OPENMP
UINT threshold = 8;
if (dim < (((ITYPE)1) << threshold)) {
initialize_Haar_random_state_with_seed_single(state, dim, seed);
}
else {
initialize_Haar_random_state_with_seed_parallel(state, dim, seed);
}
#else
initialize_Haar_random_state_with_seed_single(state, dim, seed);
#endif
}
// single thread
void initialize_Haar_random_state_with_seed_single(CTYPE *state, ITYPE dim, UINT seed) {
const int ignore_first = 40;
double norm = 0.;
unsigned long random_state[4];
srand(seed);
random_state[0] = rand();
random_state[1] = rand();
random_state[2] = rand();
random_state[3] = rand();
for (int i = 0; i < ignore_first; ++i) xor128(random_state);
for (ITYPE index = 0; index < dim; ++index) {
double r1, r2;
r1 = random_normal(random_state);
r2 = random_normal(random_state);
state[index] = r1 + 1.i * r2;
norm += r1 * r1 + r2 * r2;
}
norm = sqrt(norm);
for (ITYPE index = 0; index < dim; ++index) {
state[index] /= norm;
}
}
#ifdef _OPENMP
void initialize_Haar_random_state_with_seed_parallel(CTYPE *state, ITYPE dim, UINT seed) {
// multi thread
const int ignore_first = 40;
const UINT thread_count = omp_get_max_threads();
const ITYPE block_size = dim / thread_count;
const ITYPE residual = dim % thread_count;
unsigned long* random_state_list = (unsigned long*)malloc(sizeof(unsigned long)*4*thread_count);
srand(seed);
for (UINT i = 0; i < 4*thread_count; ++i) {
random_state_list[i] = rand();
}
double* norm_list = (double*)malloc(sizeof(double)*thread_count);
for (UINT i = 0; i < thread_count; ++i) {
norm_list[i] = 0;
}
#pragma omp parallel
{
UINT thread_id = omp_get_thread_num();
unsigned long* my_random_state = random_state_list +4 * thread_id;
ITYPE start_index = block_size * thread_id + (residual>thread_id?thread_id:residual);
ITYPE end_index = block_size * (thread_id+1) + (residual > (thread_id+1) ? (thread_id+1) : residual);
ITYPE index;
// ignore first randoms
for (int i = 0; i < ignore_first; ++i) xor128(my_random_state);
for (index = start_index; index < end_index; ++index) {
double r1, r2;
r1 = r2 = 1;
r1 = random_normal(my_random_state);
r2 = random_normal(my_random_state);
state[index] = r1 + 1.i * r2;
norm_list[thread_id] += r1 * r1 + r2 * r2;
}
}
double normalizer = 0.;
for (UINT i = 0; i < thread_count; ++i) {
normalizer += norm_list[i];
}
normalizer = 1./sqrt(normalizer);
#pragma omp parallel for
for (ITYPE index = 0; index < dim; ++index) {
state[index] *= normalizer;
}
free(random_state_list);
free(norm_list);
}
#endif
unsigned long xor128(unsigned long* state) {
unsigned long t;
t = (state[0] ^ (state[0] << 11));
state[0] = state[1]; state[1] = state[2]; state[2] = state[3];
return (state[3] = (state[3] ^ (state[3] >> 19)) ^ (t ^ (t >> 8)));
}
double random_uniform(unsigned long* state) {
return xor128(state) / ((float)ULONG_MAX);
}
double random_normal(unsigned long* state) {
return sqrt(-1.0*log(random_uniform(state))) * sin(2.0*M_PI*random_uniform(state));
}
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% John Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/fourier.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
ChannelType
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,
% const ComplexOperator operator,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o operator: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,
const ComplexOperator operator,ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
image->storage_class=DirectClass;
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
image->storage_class=DirectClass;
image->depth=32UL;
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(images,complex_images,images->rows,1)
#endif
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict Ai,
*restrict Ar,
*restrict Bi,
*restrict Br;
register PixelPacket
*restrict Ci,
*restrict Cr;
register ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,images->columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,images->columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,images->columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,images->columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,images->columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,images->columns,1,exception);
if ((Ar == (const PixelPacket *) NULL) ||
(Ai == (const PixelPacket *) NULL) ||
(Br == (const PixelPacket *) NULL) ||
(Bi == (const PixelPacket *) NULL) ||
(Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) images->columns; x++)
{
switch (operator)
{
case AddComplexOperator:
{
Cr->red=Ar->red+Br->red;
Ci->red=Ai->red+Bi->red;
Cr->green=Ar->green+Br->green;
Ci->green=Ai->green+Bi->green;
Cr->blue=Ar->blue+Br->blue;
Ci->blue=Ai->blue+Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity+Br->opacity;
Ci->opacity=Ai->opacity+Bi->opacity;
}
break;
}
case ConjugateComplexOperator:
default:
{
Cr->red=Ar->red;
Ci->red=(-Bi->red);
Cr->green=Ar->green;
Ci->green=(-Bi->green);
Cr->blue=Ar->blue;
Ci->blue=(-Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity;
Ci->opacity=(-Bi->opacity);
}
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=PerceptibleReciprocal(Br->red*Br->red+Bi->red*Bi->red+snr);
Cr->red=gamma*(Ar->red*Br->red+Ai->red*Bi->red);
Ci->red=gamma*(Ai->red*Br->red-Ar->red*Bi->red);
gamma=PerceptibleReciprocal(Br->green*Br->green+Bi->green*Bi->green+
snr);
Cr->green=gamma*(Ar->green*Br->green+Ai->green*Bi->green);
Ci->green=gamma*(Ai->green*Br->green-Ar->green*Bi->green);
gamma=PerceptibleReciprocal(Br->blue*Br->blue+Bi->blue*Bi->blue+snr);
Cr->blue=gamma*(Ar->blue*Br->blue+Ai->blue*Bi->blue);
Ci->blue=gamma*(Ai->blue*Br->blue-Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
gamma=PerceptibleReciprocal(Br->opacity*Br->opacity+Bi->opacity*
Bi->opacity+snr);
Cr->opacity=gamma*(Ar->opacity*Br->opacity+Ai->opacity*
Bi->opacity);
Ci->opacity=gamma*(Ai->opacity*Br->opacity-Ar->opacity*
Bi->opacity);
}
break;
}
case MagnitudePhaseComplexOperator:
{
Cr->red=sqrt(Ar->red*Ar->red+Ai->red*Ai->red);
Ci->red=atan2(Ai->red,Ar->red)/(2.0*MagickPI)+0.5;
Cr->green=sqrt(Ar->green*Ar->green+Ai->green*Ai->green);
Ci->green=atan2(Ai->green,Ar->green)/(2.0*MagickPI)+0.5;
Cr->blue=sqrt(Ar->blue*Ar->blue+Ai->blue*Ai->blue);
Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5;
if (images->matte != MagickFalse)
{
Cr->opacity=sqrt(Ar->opacity*Ar->opacity+Ai->opacity*Ai->opacity);
Ci->opacity=atan2(Ai->opacity,Ar->opacity)/(2.0*MagickPI)+0.5;
}
break;
}
case MultiplyComplexOperator:
{
Cr->red=QuantumScale*(Ar->red*Br->red-Ai->red*Bi->red);
Ci->red=QuantumScale*(Ai->red*Br->red+Ar->red*Bi->red);
Cr->green=QuantumScale*(Ar->green*Br->green-Ai->green*Bi->green);
Ci->green=QuantumScale*(Ai->green*Br->green+Ar->green*Bi->green);
Cr->blue=QuantumScale*(Ar->blue*Br->blue-Ai->blue*Bi->blue);
Ci->blue=QuantumScale*(Ai->blue*Br->blue+Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=QuantumScale*(Ar->opacity*Br->opacity-Ai->opacity*
Bi->opacity);
Ci->opacity=QuantumScale*(Ai->opacity*Br->opacity+Ar->opacity*
Bi->opacity);
}
break;
}
case RealImaginaryComplexOperator:
{
Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5));
Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5));
Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5));
Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5));
Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5));
Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5));
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5));
Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5));
}
break;
}
case SubtractComplexOperator:
{
Cr->red=Ar->red-Br->red;
Ci->red=Ai->red-Bi->red;
Cr->green=Ar->green-Br->green;
Ci->green=Ai->green-Bi->green;
Cr->blue=Ar->blue-Br->blue;
Ci->blue=Ai->blue-Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity-Br->opacity;
Ci->opacity=Ai->opacity-Bi->opacity;
}
break;
}
}
Ar++;
Ai++;
Br++;
Bi++;
Cr++;
Ci++;
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ComplexImages)
#endif
proceed=SetImageProgress(images,ComplexImageTag,progress++,
images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
register ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(height,width*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) CopyMagickMemory(roll_pixels,source_pixels,height*width*
sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) floor((double) width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L-1L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L-1L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[-x+width/2L-1L]=forward_pixels[x+width/2L+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
register ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) ResetMagickMemory(magnitude_pixels,0,fourier_info->height*
fourier_info->width*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) ResetMagickMemory(phase_pixels,0,fourier_info->height*
fourier_info->width*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->height,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
magnitude_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->height,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
ResetMagickMemory(source_pixels,0,fourier_info->height*fourier_info->width*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
source_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
source_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->center*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute(fftw_r2c_plan);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
size_t
extent;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) floor((double) fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
extent,
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
extent=image->columns < image->rows ? image->rows : image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsGrayImage(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayChannels,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,RedChannel,
modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->matte != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) floor((double) width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Inverse fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t)fourier_info->height,
fourier_info->width*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->center*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
magnitude_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) CopyMagickMemory(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
phase_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
phase_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) CopyMagickMemory(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
double
*source_pixels;
const char
*value;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize Fourier transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
{
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute(fftw_c2r_plan);
fftw_destroy_plan(fftw_c2r_plan);
}
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
source_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
}
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
size_t
extent;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) floor((double) fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsGrayImage(magnitude_image,exception);
if (is_gray != MagickFalse)
is_gray=IsGrayImage(phase_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayChannels,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->matte != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
ft.c |
/*[]*/
struct __sFILEX ;
/*[]*/
int printf(const char *restrict , ...);
/*[]*/
void exit(int );
/*[]*/
extern double cos(double );
/*[]*/
extern double sin(double );
/*[]*/
extern double exp(double );
/*[]*/
extern double log(double );
/*[]*/
extern double fabs(double );
/*[]*/
typedef int boolean;
/*[]*/
struct stUn_imopVarPre11 {
double real;
double imag;
} ;
/*[]*/
typedef struct stUn_imopVarPre11 dcomplex;
/*[]*/
extern double randlc(double *, double );
/*[]*/
extern void vranlc(int , double * , double , double *);
/*[]*/
extern void timer_clear(int );
/*[]*/
extern void timer_start(int );
/*[]*/
extern void timer_stop(int );
/*[]*/
extern double timer_read(int );
/*[]*/
extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand);
/*[]*/
int fftblock;
/*[]*/
int fftblockpad;
/*[]*/
static int dims[3][3];
/*[]*/
static int xstart[3];
/*[]*/
static int ystart[3];
/*[]*/
static int zstart[3];
/*[]*/
static int xend[3];
/*[]*/
static int yend[3];
/*[]*/
static int zend[3];
/*[]*/
static double ex[(6 * (64 * 64 / 4 + 64 * 64 / 4 + 64 * 64 / 4)) + 1];
/*[]*/
static dcomplex u[64];
/*[]*/
static dcomplex sums[6 + 1];
/*[]*/
static int niter;
/*[]*/
static void evolve(dcomplex u0[64][64][64], dcomplex u1[64][64][64] , int t , int indexmap[64][64][64] , int d[3]);
/*[]*/
static void compute_initial_conditions(dcomplex u0[64][64][64], int d[3]);
/*[]*/
static void ipow46(double a, int exponent , double *result);
/*[]*/
static void setup(void );
/*[]*/
static void compute_indexmap(int indexmap[64][64][64], int d[3]);
/*[]*/
static void print_timers(void );
/*[]*/
static void fft(int dir, dcomplex x1[64][64][64] , dcomplex x2[64][64][64]);
/*[]*/
static void cffts1(int is, int d[3] , dcomplex x[64][64][64] , dcomplex xout[64][64][64] , dcomplex y0[64][18] , dcomplex y1[64][18]);
/*[]*/
static void cffts2(int is, int d[3] , dcomplex x[64][64][64] , dcomplex xout[64][64][64] , dcomplex y0[64][18] , dcomplex y1[64][18]);
/*[]*/
static void cffts3(int is, int d[3] , dcomplex x[64][64][64] , dcomplex xout[64][64][64] , dcomplex y0[64][18] , dcomplex y1[64][18]);
/*[]*/
static void fft_init(int n);
/*[]*/
static void cfftz(int is, int m , int n , dcomplex x[64][18] , dcomplex y[64][18]);
/*[]*/
static void fftz2(int is, int l , int m , int n , int ny , int ny1 , dcomplex u[64] , dcomplex x[64][18] , dcomplex y[64][18]);
/*[]*/
static int ilog2(int n);
/*[]*/
static void checksum(int i, dcomplex u1[64][64][64] , int d[3]);
/*[]*/
static void verify(int d1, int d2 , int d3 , int nt , boolean *verified , char *class);
/*[]*/
/*[]*/
/*[]*/
int main(int argc, char **argv) {
/*[]*/
/*[]*/
int i;
/*[]*/
static dcomplex u0[64][64][64];
/*[]*/
static dcomplex u1[64][64][64];
/*[]*/
static dcomplex u2[64][64][64];
/*[]*/
static int indexmap[64][64][64];
/*[]*/
int iter;
/*[]*/
int nthreads = 1;
/*[]*/
double total_time;
/*[]*/
double mflops;
/*[]*/
boolean verified;
/*[]*/
char class;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i < 7; i++) {
/*[]*/
/*[]*/
timer_clear(i);
/*[]*/
}
/*[]*/
setup();
/*[]*/
/*[]*/
int ( *_imopVarPre145 );
/*[]*/
_imopVarPre145 = dims[2];
/*[]*/
int *d_imopVarPre86;
/*[]*/
d_imopVarPre86 = _imopVarPre145;
/*[]*/
int i_imopVarPre87;
/*[]*/
int j_imopVarPre88;
/*[]*/
int k_imopVarPre89;
/*[]*/
int ii_imopVarPre90;
/*[]*/
int ii2_imopVarPre91;
/*[]*/
int jj_imopVarPre92;
/*[]*/
int ij2_imopVarPre93;
/*[]*/
int kk_imopVarPre94;
/*[]*/
double ap_imopVarPre95;
/*[1]*/
#pragma omp parallel default(shared) private(i_imopVarPre87, j_imopVarPre88, k_imopVarPre89, ii_imopVarPre90, ii2_imopVarPre91, jj_imopVarPre92, ij2_imopVarPre93, kk_imopVarPre94)
{
/*[1]*/
/*[1]*/
#pragma omp for nowait
/*[1]*/
/*[1]*/
/*[1]*/
for (i_imopVarPre87 = 0; i_imopVarPre87 < dims[2][0]; i_imopVarPre87++) {
/*[1]*/
/*[1]*/
ii_imopVarPre90 = (i_imopVarPre87 + 1 + xstart[2] - 2 + 64 / 2) % 64 - 64 / 2;
/*[1]*/
ii2_imopVarPre91 = ii_imopVarPre90 * ii_imopVarPre90;
/*[1]*/
/*[1]*/
/*[1]*/
/*[1]*/
for (j_imopVarPre88 = 0; j_imopVarPre88 < dims[2][1]; j_imopVarPre88++) {
/*[1]*/
/*[1]*/
jj_imopVarPre92 = (j_imopVarPre88 + 1 + ystart[2] - 2 + 64 / 2) % 64 - 64 / 2;
/*[1]*/
ij2_imopVarPre93 = jj_imopVarPre92 * jj_imopVarPre92 + ii2_imopVarPre91;
/*[1]*/
/*[1]*/
/*[1]*/
/*[1]*/
for (k_imopVarPre89 = 0; k_imopVarPre89 < dims[2][2]; k_imopVarPre89++) {
/*[1]*/
/*[1]*/
kk_imopVarPre94 = (k_imopVarPre89 + 1 + zstart[2] - 2 + 64 / 2) % 64 - 64 / 2;
/*[1]*/
indexmap[k_imopVarPre89][j_imopVarPre88][i_imopVarPre87] = kk_imopVarPre94 * kk_imopVarPre94 + ij2_imopVarPre93;
}
}
}
}
/*[1]*/
ap_imopVarPre95 = -4.0 * 1.0e-6 * 3.141592653589793238 * 3.141592653589793238;
/*[1]*/
ex[0] = 1.0;
/*[1]*/
double _imopVarPre217_imopVarPre96;
/*[1]*/
_imopVarPre217_imopVarPre96 = exp(ap_imopVarPre95);
/*[1]*/
/*[1]*/
ex[1] = _imopVarPre217_imopVarPre96;
/*[1]*/
/*[1]*/
/*[1]*/
/*[1]*/
for (i_imopVarPre87 = 2; i_imopVarPre87 <= (6 * (64 * 64 / 4 + 64 * 64 / 4 + 64 * 64 / 4)); i_imopVarPre87++) {
/*[1]*/
/*[1]*/
ex[i_imopVarPre87] = ex[i_imopVarPre87 - 1] * ex[1];
}
/*[]*/
int ( *_imopVarPre147 );
/*[]*/
_imopVarPre147 = dims[0];
/*[]*/
compute_initial_conditions(u1, _imopVarPre147);
/*[]*/
/*[]*/
int _imopVarPre149;
/*[]*/
_imopVarPre149 = dims[0][0];
/*[]*/
fft_init(_imopVarPre149);
/*[]*/
/*[]*/
int dir;
/*[]*/
struct stUn_imopVarPre11 ( *x1 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *x2 )[64][64];
/*[]*/
dir = 1;
/*[]*/
x1 = u1;
/*[]*/
x2 = u0;
/*[]*/
dcomplex y0[64][18];
/*[]*/
dcomplex y1[64][18];
/*[]*/
/*[]*/
if (dir == 1) {
/*[]*/
/*[]*/
int ( *_imopVarPre225 );
/*[]*/
_imopVarPre225 = dims[0];
/*[]*/
int is;
/*[]*/
int *d;
/*[]*/
struct stUn_imopVarPre11 ( *x )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout )[64][64];
/*[]*/
is = 1;
/*[]*/
d = _imopVarPre225;
/*[]*/
x = x1;
/*[]*/
xout = x1;
/*[]*/
int logd[3];
/*[2]*/
#pragma omp parallel default(shared) shared(is)
{
/*[2]*/
/*[2]*/
int i;
/*[2]*/
int j;
/*[2]*/
int k;
/*[2]*/
int jj;
/*[2]*/
#pragma omp master
{
/*[2]*/
/*[2]*/
/*[2]*/
/*[2]*/
/*[2]*/
for (i = 0; i < 3; i++) {
/*[2]*/
/*[2]*/
int _imopVarPre243;
/*[2]*/
int _imopVarPre244;
/*[2]*/
_imopVarPre243 = d[i];
/*[2]*/
_imopVarPre244 = ilog2(_imopVarPre243);
/*[2]*/
/*[2]*/
logd[i] = _imopVarPre244;
}
}
/*[2]*/
// #pragma omp dummyFlush BARRIER_START
/*[2]*/
#pragma omp barrier
/*[3]*/
dcomplex y0[64][18];
/*[3]*/
dcomplex y1[64][18];
/*[3]*/
#pragma omp for nowait
/*[3]*/
/*[3]*/
/*[3]*/
for (k = 0; k < d[2]; k++) {
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
for (jj = 0; jj <= d[1] - fftblock; jj += fftblock) {
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
for (j = 0; j < fftblock; j++) {
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
for (i = 0; i < d[0]; i++) {
/*[3]*/
/*[3]*/
y0[i][j].real = x[k][j + jj][i].real;
/*[3]*/
y0[i][j].imag = x[k][j + jj][i].imag;
}
}
/*[3]*/
int _imopVarPre247;
/*[3]*/
int _imopVarPre248;
/*[3]*/
_imopVarPre247 = d[0];
/*[3]*/
_imopVarPre248 = logd[0];
/*[3]*/
cfftz(is, _imopVarPre248, _imopVarPre247, y0, y1);
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
for (j = 0; j < fftblock; j++) {
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
for (i = 0; i < d[0]; i++) {
/*[3]*/
/*[3]*/
xout[k][j + jj][i].real = y0[i][j].real;
/*[3]*/
xout[k][j + jj][i].imag = y0[i][j].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre227 );
/*[]*/
_imopVarPre227 = dims[1];
/*[]*/
int is_imopVarPre76;
/*[]*/
int *d_imopVarPre77;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre78 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre79 )[64][64];
/*[]*/
is_imopVarPre76 = 1;
/*[]*/
d_imopVarPre77 = _imopVarPre227;
/*[]*/
x_imopVarPre78 = x1;
/*[]*/
xout_imopVarPre79 = x1;
/*[]*/
int logd_imopVarPre80[3];
/*[4]*/
#pragma omp parallel default(shared) shared(is_imopVarPre76)
{
/*[4]*/
/*[4]*/
int i;
/*[4]*/
int j;
/*[4]*/
int k;
/*[4]*/
int ii;
/*[4]*/
#pragma omp master
{
/*[4]*/
/*[4]*/
/*[4]*/
/*[4]*/
/*[4]*/
for (i = 0; i < 3; i++) {
/*[4]*/
/*[4]*/
int _imopVarPre250;
/*[4]*/
int _imopVarPre251;
/*[4]*/
_imopVarPre250 = d_imopVarPre77[i];
/*[4]*/
_imopVarPre251 = ilog2(_imopVarPre250);
/*[4]*/
/*[4]*/
logd_imopVarPre80[i] = _imopVarPre251;
}
}
/*[4]*/
// #pragma omp dummyFlush BARRIER_START
/*[4]*/
#pragma omp barrier
/*[5]*/
dcomplex y0[64][18];
/*[5]*/
dcomplex y1[64][18];
/*[5]*/
#pragma omp for nowait
/*[5]*/
/*[5]*/
/*[5]*/
for (k = 0; k < d_imopVarPre77[2]; k++) {
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
for (ii = 0; ii <= d_imopVarPre77[0] - fftblock; ii += fftblock) {
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
for (j = 0; j < d_imopVarPre77[1]; j++) {
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
for (i = 0; i < fftblock; i++) {
/*[5]*/
/*[5]*/
y0[j][i].real = x_imopVarPre78[k][j][i + ii].real;
/*[5]*/
y0[j][i].imag = x_imopVarPre78[k][j][i + ii].imag;
}
}
/*[5]*/
int _imopVarPre254;
/*[5]*/
int _imopVarPre255;
/*[5]*/
_imopVarPre254 = d_imopVarPre77[1];
/*[5]*/
_imopVarPre255 = logd_imopVarPre80[1];
/*[5]*/
cfftz(is_imopVarPre76, _imopVarPre255, _imopVarPre254, y0, y1);
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
for (j = 0; j < d_imopVarPre77[1]; j++) {
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
for (i = 0; i < fftblock; i++) {
/*[5]*/
/*[5]*/
xout_imopVarPre79[k][j][i + ii].real = y0[j][i].real;
/*[5]*/
xout_imopVarPre79[k][j][i + ii].imag = y0[j][i].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre229 );
/*[]*/
_imopVarPre229 = dims[2];
/*[]*/
int is_imopVarPre81;
/*[]*/
int *d_imopVarPre82;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre83 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre84 )[64][64];
/*[]*/
is_imopVarPre81 = 1;
/*[]*/
d_imopVarPre82 = _imopVarPre229;
/*[]*/
x_imopVarPre83 = x1;
/*[]*/
xout_imopVarPre84 = x2;
/*[]*/
int logd_imopVarPre85[3];
/*[6]*/
#pragma omp parallel default(shared) shared(is_imopVarPre81)
{
/*[6]*/
/*[6]*/
int i;
/*[6]*/
int j;
/*[6]*/
int k;
/*[6]*/
int ii;
/*[6]*/
#pragma omp master
{
/*[6]*/
/*[6]*/
/*[6]*/
/*[6]*/
/*[6]*/
for (i = 0; i < 3; i++) {
/*[6]*/
/*[6]*/
int _imopVarPre257;
/*[6]*/
int _imopVarPre258;
/*[6]*/
_imopVarPre257 = d_imopVarPre82[i];
/*[6]*/
_imopVarPre258 = ilog2(_imopVarPre257);
/*[6]*/
/*[6]*/
logd_imopVarPre85[i] = _imopVarPre258;
}
}
/*[6]*/
// #pragma omp dummyFlush BARRIER_START
/*[6]*/
#pragma omp barrier
/*[7]*/
dcomplex y0[64][18];
/*[7]*/
dcomplex y1[64][18];
/*[7]*/
#pragma omp for nowait
/*[7]*/
/*[7]*/
/*[7]*/
for (j = 0; j < d_imopVarPre82[1]; j++) {
/*[7]*/
/*[7]*/
/*[7]*/
/*[7]*/
/*[7]*/
for (ii = 0; ii <= d_imopVarPre82[0] - fftblock; ii += fftblock) {
/*[7]*/
/*[7]*/
/*[7]*/
/*[7]*/
/*[7]*/
for (k = 0; k < d_imopVarPre82[2]; k++) {
/*[7]*/
/*[7]*/
/*[7]*/
/*[7]*/
/*[7]*/
for (i = 0; i < fftblock; i++) {
/*[7]*/
/*[7]*/
y0[k][i].real = x_imopVarPre83[k][j][i + ii].real;
/*[7]*/
y0[k][i].imag = x_imopVarPre83[k][j][i + ii].imag;
}
}
/*[7]*/
int _imopVarPre261;
/*[7]*/
int _imopVarPre262;
/*[7]*/
_imopVarPre261 = d_imopVarPre82[2];
/*[7]*/
_imopVarPre262 = logd_imopVarPre85[2];
/*[7]*/
cfftz(is_imopVarPre81, _imopVarPre262, _imopVarPre261, y0, y1);
/*[7]*/
/*[7]*/
/*[7]*/
/*[7]*/
/*[7]*/
for (k = 0; k < d_imopVarPre82[2]; k++) {
/*[7]*/
/*[7]*/
/*[7]*/
/*[7]*/
/*[7]*/
for (i = 0; i < fftblock; i++) {
/*[7]*/
/*[7]*/
xout_imopVarPre84[k][j][i + ii].real = y0[k][i].real;
/*[7]*/
xout_imopVarPre84[k][j][i + ii].imag = y0[k][i].imag;
}
}
}
}
}
} else {
/*[]*/
/*[]*/
int ( *_imopVarPre232 );
/*[]*/
int _imopVarPre233;
/*[]*/
_imopVarPre232 = dims[2];
/*[]*/
_imopVarPre233 = -1;
/*[]*/
int is_imopVarPre102;
/*[]*/
int *d_imopVarPre103;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre104 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre105 )[64][64];
/*[]*/
is_imopVarPre102 = _imopVarPre233;
/*[]*/
d_imopVarPre103 = _imopVarPre232;
/*[]*/
x_imopVarPre104 = x1;
/*[]*/
xout_imopVarPre105 = x1;
/*[]*/
int logd_imopVarPre106[3];
/*[8]*/
#pragma omp parallel default(shared) shared(is_imopVarPre102)
{
/*[8]*/
/*[8]*/
int i;
/*[8]*/
int j;
/*[8]*/
int k;
/*[8]*/
int ii;
/*[8]*/
#pragma omp master
{
/*[8]*/
/*[8]*/
/*[8]*/
/*[8]*/
/*[8]*/
for (i = 0; i < 3; i++) {
/*[8]*/
/*[8]*/
int _imopVarPre257;
/*[8]*/
int _imopVarPre258;
/*[8]*/
_imopVarPre257 = d_imopVarPre103[i];
/*[8]*/
_imopVarPre258 = ilog2(_imopVarPre257);
/*[8]*/
/*[8]*/
logd_imopVarPre106[i] = _imopVarPre258;
}
}
/*[8]*/
// #pragma omp dummyFlush BARRIER_START
/*[8]*/
#pragma omp barrier
/*[9]*/
dcomplex y0[64][18];
/*[9]*/
dcomplex y1[64][18];
/*[9]*/
#pragma omp for nowait
/*[9]*/
/*[9]*/
/*[9]*/
for (j = 0; j < d_imopVarPre103[1]; j++) {
/*[9]*/
/*[9]*/
/*[9]*/
/*[9]*/
/*[9]*/
for (ii = 0; ii <= d_imopVarPre103[0] - fftblock; ii += fftblock) {
/*[9]*/
/*[9]*/
/*[9]*/
/*[9]*/
/*[9]*/
for (k = 0; k < d_imopVarPre103[2]; k++) {
/*[9]*/
/*[9]*/
/*[9]*/
/*[9]*/
/*[9]*/
for (i = 0; i < fftblock; i++) {
/*[9]*/
/*[9]*/
y0[k][i].real = x_imopVarPre104[k][j][i + ii].real;
/*[9]*/
y0[k][i].imag = x_imopVarPre104[k][j][i + ii].imag;
}
}
/*[9]*/
int _imopVarPre261;
/*[9]*/
int _imopVarPre262;
/*[9]*/
_imopVarPre261 = d_imopVarPre103[2];
/*[9]*/
_imopVarPre262 = logd_imopVarPre106[2];
/*[9]*/
cfftz(is_imopVarPre102, _imopVarPre262, _imopVarPre261, y0, y1);
/*[9]*/
/*[9]*/
/*[9]*/
/*[9]*/
/*[9]*/
for (k = 0; k < d_imopVarPre103[2]; k++) {
/*[9]*/
/*[9]*/
/*[9]*/
/*[9]*/
/*[9]*/
for (i = 0; i < fftblock; i++) {
/*[9]*/
/*[9]*/
xout_imopVarPre105[k][j][i + ii].real = y0[k][i].real;
/*[9]*/
xout_imopVarPre105[k][j][i + ii].imag = y0[k][i].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre236 );
/*[]*/
int _imopVarPre237;
/*[]*/
_imopVarPre236 = dims[1];
/*[]*/
_imopVarPre237 = -1;
/*[]*/
int is;
/*[]*/
int *d;
/*[]*/
struct stUn_imopVarPre11 ( *x )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout )[64][64];
/*[]*/
is = _imopVarPre237;
/*[]*/
d = _imopVarPre236;
/*[]*/
x = x1;
/*[]*/
xout = x1;
/*[]*/
int logd[3];
/*[10]*/
#pragma omp parallel default(shared) shared(is)
{
/*[10]*/
/*[10]*/
int i;
/*[10]*/
int j;
/*[10]*/
int k;
/*[10]*/
int ii;
/*[10]*/
#pragma omp master
{
/*[10]*/
/*[10]*/
/*[10]*/
/*[10]*/
/*[10]*/
for (i = 0; i < 3; i++) {
/*[10]*/
/*[10]*/
int _imopVarPre250;
/*[10]*/
int _imopVarPre251;
/*[10]*/
_imopVarPre250 = d[i];
/*[10]*/
_imopVarPre251 = ilog2(_imopVarPre250);
/*[10]*/
/*[10]*/
logd[i] = _imopVarPre251;
}
}
/*[10]*/
// #pragma omp dummyFlush BARRIER_START
/*[10]*/
#pragma omp barrier
/*[11]*/
dcomplex y0[64][18];
/*[11]*/
dcomplex y1[64][18];
/*[11]*/
#pragma omp for nowait
/*[11]*/
/*[11]*/
/*[11]*/
for (k = 0; k < d[2]; k++) {
/*[11]*/
/*[11]*/
/*[11]*/
/*[11]*/
/*[11]*/
for (ii = 0; ii <= d[0] - fftblock; ii += fftblock) {
/*[11]*/
/*[11]*/
/*[11]*/
/*[11]*/
/*[11]*/
for (j = 0; j < d[1]; j++) {
/*[11]*/
/*[11]*/
/*[11]*/
/*[11]*/
/*[11]*/
for (i = 0; i < fftblock; i++) {
/*[11]*/
/*[11]*/
y0[j][i].real = x[k][j][i + ii].real;
/*[11]*/
y0[j][i].imag = x[k][j][i + ii].imag;
}
}
/*[11]*/
int _imopVarPre254;
/*[11]*/
int _imopVarPre255;
/*[11]*/
_imopVarPre254 = d[1];
/*[11]*/
_imopVarPre255 = logd[1];
/*[11]*/
cfftz(is, _imopVarPre255, _imopVarPre254, y0, y1);
/*[11]*/
/*[11]*/
/*[11]*/
/*[11]*/
/*[11]*/
for (j = 0; j < d[1]; j++) {
/*[11]*/
/*[11]*/
/*[11]*/
/*[11]*/
/*[11]*/
for (i = 0; i < fftblock; i++) {
/*[11]*/
/*[11]*/
xout[k][j][i + ii].real = y0[j][i].real;
/*[11]*/
xout[k][j][i + ii].imag = y0[j][i].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre240 );
/*[]*/
int _imopVarPre241;
/*[]*/
_imopVarPre240 = dims[0];
/*[]*/
_imopVarPre241 = -1;
/*[]*/
int is_imopVarPre97;
/*[]*/
int *d_imopVarPre98;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre99 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre100 )[64][64];
/*[]*/
is_imopVarPre97 = _imopVarPre241;
/*[]*/
d_imopVarPre98 = _imopVarPre240;
/*[]*/
x_imopVarPre99 = x1;
/*[]*/
xout_imopVarPre100 = x2;
/*[]*/
int logd_imopVarPre101[3];
/*[12]*/
#pragma omp parallel default(shared) shared(is_imopVarPre97)
{
/*[12]*/
/*[12]*/
int i;
/*[12]*/
int j;
/*[12]*/
int k;
/*[12]*/
int jj;
/*[12]*/
#pragma omp master
{
/*[12]*/
/*[12]*/
/*[12]*/
/*[12]*/
/*[12]*/
for (i = 0; i < 3; i++) {
/*[12]*/
/*[12]*/
int _imopVarPre243;
/*[12]*/
int _imopVarPre244;
/*[12]*/
_imopVarPre243 = d_imopVarPre98[i];
/*[12]*/
_imopVarPre244 = ilog2(_imopVarPre243);
/*[12]*/
/*[12]*/
logd_imopVarPre101[i] = _imopVarPre244;
}
}
/*[12]*/
// #pragma omp dummyFlush BARRIER_START
/*[12]*/
#pragma omp barrier
/*[13]*/
dcomplex y0[64][18];
/*[13]*/
dcomplex y1[64][18];
/*[13]*/
#pragma omp for nowait
/*[13]*/
/*[13]*/
/*[13]*/
for (k = 0; k < d_imopVarPre98[2]; k++) {
/*[13]*/
/*[13]*/
/*[13]*/
/*[13]*/
/*[13]*/
for (jj = 0; jj <= d_imopVarPre98[1] - fftblock; jj += fftblock) {
/*[13]*/
/*[13]*/
/*[13]*/
/*[13]*/
/*[13]*/
for (j = 0; j < fftblock; j++) {
/*[13]*/
/*[13]*/
/*[13]*/
/*[13]*/
/*[13]*/
for (i = 0; i < d_imopVarPre98[0]; i++) {
/*[13]*/
/*[13]*/
y0[i][j].real = x_imopVarPre99[k][j + jj][i].real;
/*[13]*/
y0[i][j].imag = x_imopVarPre99[k][j + jj][i].imag;
}
}
/*[13]*/
int _imopVarPre247;
/*[13]*/
int _imopVarPre248;
/*[13]*/
_imopVarPre247 = d_imopVarPre98[0];
/*[13]*/
_imopVarPre248 = logd_imopVarPre101[0];
/*[13]*/
cfftz(is_imopVarPre97, _imopVarPre248, _imopVarPre247, y0, y1);
/*[13]*/
/*[13]*/
/*[13]*/
/*[13]*/
/*[13]*/
for (j = 0; j < fftblock; j++) {
/*[13]*/
/*[13]*/
/*[13]*/
/*[13]*/
/*[13]*/
for (i = 0; i < d_imopVarPre98[0]; i++) {
/*[13]*/
/*[13]*/
xout_imopVarPre100[k][j + jj][i].real = y0[i][j].real;
/*[13]*/
xout_imopVarPre100[k][j + jj][i].imag = y0[i][j].imag;
}
}
}
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i < 7; i++) {
/*[]*/
/*[]*/
timer_clear(i);
/*[]*/
}
/*[]*/
timer_start(0);
/*[]*/
/*[]*/
/*[]*/
if (0 == 1) {
/*[]*/
/*[]*/
timer_start(1);
/*[]*/
}
/*[]*/
int ( *_imopVarPre151 );
/*[]*/
_imopVarPre151 = dims[2];
/*[]*/
int *d;
/*[]*/
d = _imopVarPre151;
/*[]*/
int i_imopVarPre75;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int ii;
/*[]*/
int ii2;
/*[]*/
int jj;
/*[]*/
int ij2;
/*[]*/
int kk;
/*[]*/
double ap;
/*[14]*/
#pragma omp parallel default(shared) private(i_imopVarPre75, j, k, ii, ii2, jj, ij2, kk)
{
/*[14]*/
/*[14]*/
#pragma omp for nowait
/*[14]*/
/*[14]*/
/*[14]*/
for (i_imopVarPre75 = 0; i_imopVarPre75 < dims[2][0]; i_imopVarPre75++) {
/*[14]*/
/*[14]*/
ii = (i_imopVarPre75 + 1 + xstart[2] - 2 + 64 / 2) % 64 - 64 / 2;
/*[14]*/
ii2 = ii * ii;
/*[14]*/
/*[14]*/
/*[14]*/
/*[14]*/
for (j = 0; j < dims[2][1]; j++) {
/*[14]*/
/*[14]*/
jj = (j + 1 + ystart[2] - 2 + 64 / 2) % 64 - 64 / 2;
/*[14]*/
ij2 = jj * jj + ii2;
/*[14]*/
/*[14]*/
/*[14]*/
/*[14]*/
for (k = 0; k < dims[2][2]; k++) {
/*[14]*/
/*[14]*/
kk = (k + 1 + zstart[2] - 2 + 64 / 2) % 64 - 64 / 2;
/*[14]*/
indexmap[k][j][i_imopVarPre75] = kk * kk + ij2;
}
}
}
}
/*[14]*/
ap = -4.0 * 1.0e-6 * 3.141592653589793238 * 3.141592653589793238;
/*[14]*/
ex[0] = 1.0;
/*[14]*/
double _imopVarPre217;
/*[14]*/
_imopVarPre217 = exp(ap);
/*[14]*/
/*[14]*/
ex[1] = _imopVarPre217;
/*[14]*/
/*[14]*/
/*[14]*/
/*[14]*/
for (i_imopVarPre75 = 2; i_imopVarPre75 <= (6 * (64 * 64 / 4 + 64 * 64 / 4 + 64 * 64 / 4)); i_imopVarPre75++) {
/*[14]*/
/*[14]*/
ex[i_imopVarPre75] = ex[i_imopVarPre75 - 1] * ex[1];
}
/*[]*/
int ( *_imopVarPre153 );
/*[]*/
_imopVarPre153 = dims[0];
/*[]*/
compute_initial_conditions(u1, _imopVarPre153);
/*[]*/
/*[]*/
int _imopVarPre155;
/*[]*/
_imopVarPre155 = dims[0][0];
/*[]*/
fft_init(_imopVarPre155);
/*[]*/
/*[]*/
/*[]*/
if (0 == 1) {
/*[]*/
/*[]*/
timer_stop(1);
/*[]*/
}
/*[]*/
/*[]*/
if (0 == 1) {
/*[]*/
/*[]*/
timer_start(2);
/*[]*/
}
/*[]*/
int dir_imopVarPre112;
/*[]*/
struct stUn_imopVarPre11 ( *x1_imopVarPre113 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *x2_imopVarPre114 )[64][64];
/*[]*/
dir_imopVarPre112 = 1;
/*[]*/
x1_imopVarPre113 = u1;
/*[]*/
x2_imopVarPre114 = u0;
/*[]*/
dcomplex y0_imopVarPre115[64][18];
/*[]*/
dcomplex y1_imopVarPre116[64][18];
/*[]*/
/*[]*/
if (dir_imopVarPre112 == 1) {
/*[]*/
/*[]*/
int ( *_imopVarPre225 );
/*[]*/
_imopVarPre225 = dims[0];
/*[]*/
int is;
/*[]*/
int *d;
/*[]*/
struct stUn_imopVarPre11 ( *x )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout )[64][64];
/*[]*/
is = 1;
/*[]*/
d = _imopVarPre225;
/*[]*/
x = x1_imopVarPre113;
/*[]*/
xout = x1_imopVarPre113;
/*[]*/
int logd[3];
/*[15]*/
#pragma omp parallel default(shared) shared(is)
{
/*[15]*/
/*[15]*/
int i;
/*[15]*/
int j;
/*[15]*/
int k;
/*[15]*/
int jj;
/*[15]*/
#pragma omp master
{
/*[15]*/
/*[15]*/
/*[15]*/
/*[15]*/
/*[15]*/
for (i = 0; i < 3; i++) {
/*[15]*/
/*[15]*/
int _imopVarPre243;
/*[15]*/
int _imopVarPre244;
/*[15]*/
_imopVarPre243 = d[i];
/*[15]*/
_imopVarPre244 = ilog2(_imopVarPre243);
/*[15]*/
/*[15]*/
logd[i] = _imopVarPre244;
}
}
/*[15]*/
// #pragma omp dummyFlush BARRIER_START
/*[15]*/
#pragma omp barrier
/*[16]*/
dcomplex y0[64][18];
/*[16]*/
dcomplex y1[64][18];
/*[16]*/
#pragma omp for nowait
/*[16]*/
/*[16]*/
/*[16]*/
for (k = 0; k < d[2]; k++) {
/*[16]*/
/*[16]*/
/*[16]*/
/*[16]*/
/*[16]*/
for (jj = 0; jj <= d[1] - fftblock; jj += fftblock) {
/*[16]*/
/*[16]*/
/*[16]*/
/*[16]*/
/*[16]*/
for (j = 0; j < fftblock; j++) {
/*[16]*/
/*[16]*/
/*[16]*/
/*[16]*/
/*[16]*/
for (i = 0; i < d[0]; i++) {
/*[16]*/
/*[16]*/
y0[i][j].real = x[k][j + jj][i].real;
/*[16]*/
y0[i][j].imag = x[k][j + jj][i].imag;
}
}
/*[16]*/
int _imopVarPre247;
/*[16]*/
int _imopVarPre248;
/*[16]*/
_imopVarPre247 = d[0];
/*[16]*/
_imopVarPre248 = logd[0];
/*[16]*/
cfftz(is, _imopVarPre248, _imopVarPre247, y0, y1);
/*[16]*/
/*[16]*/
/*[16]*/
/*[16]*/
/*[16]*/
for (j = 0; j < fftblock; j++) {
/*[16]*/
/*[16]*/
/*[16]*/
/*[16]*/
/*[16]*/
for (i = 0; i < d[0]; i++) {
/*[16]*/
/*[16]*/
xout[k][j + jj][i].real = y0[i][j].real;
/*[16]*/
xout[k][j + jj][i].imag = y0[i][j].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre227 );
/*[]*/
_imopVarPre227 = dims[1];
/*[]*/
int is_imopVarPre76;
/*[]*/
int *d_imopVarPre77;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre78 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre79 )[64][64];
/*[]*/
is_imopVarPre76 = 1;
/*[]*/
d_imopVarPre77 = _imopVarPre227;
/*[]*/
x_imopVarPre78 = x1_imopVarPre113;
/*[]*/
xout_imopVarPre79 = x1_imopVarPre113;
/*[]*/
int logd_imopVarPre80[3];
/*[17]*/
#pragma omp parallel default(shared) shared(is_imopVarPre76)
{
/*[17]*/
/*[17]*/
int i;
/*[17]*/
int j;
/*[17]*/
int k;
/*[17]*/
int ii;
/*[17]*/
#pragma omp master
{
/*[17]*/
/*[17]*/
/*[17]*/
/*[17]*/
/*[17]*/
for (i = 0; i < 3; i++) {
/*[17]*/
/*[17]*/
int _imopVarPre250;
/*[17]*/
int _imopVarPre251;
/*[17]*/
_imopVarPre250 = d_imopVarPre77[i];
/*[17]*/
_imopVarPre251 = ilog2(_imopVarPre250);
/*[17]*/
/*[17]*/
logd_imopVarPre80[i] = _imopVarPre251;
}
}
/*[17]*/
// #pragma omp dummyFlush BARRIER_START
/*[17]*/
#pragma omp barrier
/*[18]*/
dcomplex y0[64][18];
/*[18]*/
dcomplex y1[64][18];
/*[18]*/
#pragma omp for nowait
/*[18]*/
/*[18]*/
/*[18]*/
for (k = 0; k < d_imopVarPre77[2]; k++) {
/*[18]*/
/*[18]*/
/*[18]*/
/*[18]*/
/*[18]*/
for (ii = 0; ii <= d_imopVarPre77[0] - fftblock; ii += fftblock) {
/*[18]*/
/*[18]*/
/*[18]*/
/*[18]*/
/*[18]*/
for (j = 0; j < d_imopVarPre77[1]; j++) {
/*[18]*/
/*[18]*/
/*[18]*/
/*[18]*/
/*[18]*/
for (i = 0; i < fftblock; i++) {
/*[18]*/
/*[18]*/
y0[j][i].real = x_imopVarPre78[k][j][i + ii].real;
/*[18]*/
y0[j][i].imag = x_imopVarPre78[k][j][i + ii].imag;
}
}
/*[18]*/
int _imopVarPre254;
/*[18]*/
int _imopVarPre255;
/*[18]*/
_imopVarPre254 = d_imopVarPre77[1];
/*[18]*/
_imopVarPre255 = logd_imopVarPre80[1];
/*[18]*/
cfftz(is_imopVarPre76, _imopVarPre255, _imopVarPre254, y0, y1);
/*[18]*/
/*[18]*/
/*[18]*/
/*[18]*/
/*[18]*/
for (j = 0; j < d_imopVarPre77[1]; j++) {
/*[18]*/
/*[18]*/
/*[18]*/
/*[18]*/
/*[18]*/
for (i = 0; i < fftblock; i++) {
/*[18]*/
/*[18]*/
xout_imopVarPre79[k][j][i + ii].real = y0[j][i].real;
/*[18]*/
xout_imopVarPre79[k][j][i + ii].imag = y0[j][i].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre229 );
/*[]*/
_imopVarPre229 = dims[2];
/*[]*/
int is_imopVarPre81;
/*[]*/
int *d_imopVarPre82;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre83 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre84 )[64][64];
/*[]*/
is_imopVarPre81 = 1;
/*[]*/
d_imopVarPre82 = _imopVarPre229;
/*[]*/
x_imopVarPre83 = x1_imopVarPre113;
/*[]*/
xout_imopVarPre84 = x2_imopVarPre114;
/*[]*/
int logd_imopVarPre85[3];
/*[19]*/
#pragma omp parallel default(shared) shared(is_imopVarPre81)
{
/*[19]*/
/*[19]*/
int i;
/*[19]*/
int j;
/*[19]*/
int k;
/*[19]*/
int ii;
/*[19]*/
#pragma omp master
{
/*[19]*/
/*[19]*/
/*[19]*/
/*[19]*/
/*[19]*/
for (i = 0; i < 3; i++) {
/*[19]*/
/*[19]*/
int _imopVarPre257;
/*[19]*/
int _imopVarPre258;
/*[19]*/
_imopVarPre257 = d_imopVarPre82[i];
/*[19]*/
_imopVarPre258 = ilog2(_imopVarPre257);
/*[19]*/
/*[19]*/
logd_imopVarPre85[i] = _imopVarPre258;
}
}
/*[19]*/
// #pragma omp dummyFlush BARRIER_START
/*[19]*/
#pragma omp barrier
/*[20]*/
dcomplex y0[64][18];
/*[20]*/
dcomplex y1[64][18];
/*[20]*/
#pragma omp for nowait
/*[20]*/
/*[20]*/
/*[20]*/
for (j = 0; j < d_imopVarPre82[1]; j++) {
/*[20]*/
/*[20]*/
/*[20]*/
/*[20]*/
/*[20]*/
for (ii = 0; ii <= d_imopVarPre82[0] - fftblock; ii += fftblock) {
/*[20]*/
/*[20]*/
/*[20]*/
/*[20]*/
/*[20]*/
for (k = 0; k < d_imopVarPre82[2]; k++) {
/*[20]*/
/*[20]*/
/*[20]*/
/*[20]*/
/*[20]*/
for (i = 0; i < fftblock; i++) {
/*[20]*/
/*[20]*/
y0[k][i].real = x_imopVarPre83[k][j][i + ii].real;
/*[20]*/
y0[k][i].imag = x_imopVarPre83[k][j][i + ii].imag;
}
}
/*[20]*/
int _imopVarPre261;
/*[20]*/
int _imopVarPre262;
/*[20]*/
_imopVarPre261 = d_imopVarPre82[2];
/*[20]*/
_imopVarPre262 = logd_imopVarPre85[2];
/*[20]*/
cfftz(is_imopVarPre81, _imopVarPre262, _imopVarPre261, y0, y1);
/*[20]*/
/*[20]*/
/*[20]*/
/*[20]*/
/*[20]*/
for (k = 0; k < d_imopVarPre82[2]; k++) {
/*[20]*/
/*[20]*/
/*[20]*/
/*[20]*/
/*[20]*/
for (i = 0; i < fftblock; i++) {
/*[20]*/
/*[20]*/
xout_imopVarPre84[k][j][i + ii].real = y0[k][i].real;
/*[20]*/
xout_imopVarPre84[k][j][i + ii].imag = y0[k][i].imag;
}
}
}
}
}
} else {
/*[]*/
/*[]*/
int ( *_imopVarPre232 );
/*[]*/
int _imopVarPre233;
/*[]*/
_imopVarPre232 = dims[2];
/*[]*/
_imopVarPre233 = -1;
/*[]*/
int is_imopVarPre102;
/*[]*/
int *d_imopVarPre103;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre104 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre105 )[64][64];
/*[]*/
is_imopVarPre102 = _imopVarPre233;
/*[]*/
d_imopVarPre103 = _imopVarPre232;
/*[]*/
x_imopVarPre104 = x1_imopVarPre113;
/*[]*/
xout_imopVarPre105 = x1_imopVarPre113;
/*[]*/
int logd_imopVarPre106[3];
/*[21]*/
#pragma omp parallel default(shared) shared(is_imopVarPre102)
{
/*[21]*/
/*[21]*/
int i;
/*[21]*/
int j;
/*[21]*/
int k;
/*[21]*/
int ii;
/*[21]*/
#pragma omp master
{
/*[21]*/
/*[21]*/
/*[21]*/
/*[21]*/
/*[21]*/
for (i = 0; i < 3; i++) {
/*[21]*/
/*[21]*/
int _imopVarPre257;
/*[21]*/
int _imopVarPre258;
/*[21]*/
_imopVarPre257 = d_imopVarPre103[i];
/*[21]*/
_imopVarPre258 = ilog2(_imopVarPre257);
/*[21]*/
/*[21]*/
logd_imopVarPre106[i] = _imopVarPre258;
}
}
/*[21]*/
// #pragma omp dummyFlush BARRIER_START
/*[21]*/
#pragma omp barrier
/*[22]*/
dcomplex y0[64][18];
/*[22]*/
dcomplex y1[64][18];
/*[22]*/
#pragma omp for nowait
/*[22]*/
/*[22]*/
/*[22]*/
for (j = 0; j < d_imopVarPre103[1]; j++) {
/*[22]*/
/*[22]*/
/*[22]*/
/*[22]*/
/*[22]*/
for (ii = 0; ii <= d_imopVarPre103[0] - fftblock; ii += fftblock) {
/*[22]*/
/*[22]*/
/*[22]*/
/*[22]*/
/*[22]*/
for (k = 0; k < d_imopVarPre103[2]; k++) {
/*[22]*/
/*[22]*/
/*[22]*/
/*[22]*/
/*[22]*/
for (i = 0; i < fftblock; i++) {
/*[22]*/
/*[22]*/
y0[k][i].real = x_imopVarPre104[k][j][i + ii].real;
/*[22]*/
y0[k][i].imag = x_imopVarPre104[k][j][i + ii].imag;
}
}
/*[22]*/
int _imopVarPre261;
/*[22]*/
int _imopVarPre262;
/*[22]*/
_imopVarPre261 = d_imopVarPre103[2];
/*[22]*/
_imopVarPre262 = logd_imopVarPre106[2];
/*[22]*/
cfftz(is_imopVarPre102, _imopVarPre262, _imopVarPre261, y0, y1);
/*[22]*/
/*[22]*/
/*[22]*/
/*[22]*/
/*[22]*/
for (k = 0; k < d_imopVarPre103[2]; k++) {
/*[22]*/
/*[22]*/
/*[22]*/
/*[22]*/
/*[22]*/
for (i = 0; i < fftblock; i++) {
/*[22]*/
/*[22]*/
xout_imopVarPre105[k][j][i + ii].real = y0[k][i].real;
/*[22]*/
xout_imopVarPre105[k][j][i + ii].imag = y0[k][i].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre236 );
/*[]*/
int _imopVarPre237;
/*[]*/
_imopVarPre236 = dims[1];
/*[]*/
_imopVarPre237 = -1;
/*[]*/
int is;
/*[]*/
int *d;
/*[]*/
struct stUn_imopVarPre11 ( *x )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout )[64][64];
/*[]*/
is = _imopVarPre237;
/*[]*/
d = _imopVarPre236;
/*[]*/
x = x1_imopVarPre113;
/*[]*/
xout = x1_imopVarPre113;
/*[]*/
int logd[3];
/*[23]*/
#pragma omp parallel default(shared) shared(is)
{
/*[23]*/
/*[23]*/
int i;
/*[23]*/
int j;
/*[23]*/
int k;
/*[23]*/
int ii;
/*[23]*/
#pragma omp master
{
/*[23]*/
/*[23]*/
/*[23]*/
/*[23]*/
/*[23]*/
for (i = 0; i < 3; i++) {
/*[23]*/
/*[23]*/
int _imopVarPre250;
/*[23]*/
int _imopVarPre251;
/*[23]*/
_imopVarPre250 = d[i];
/*[23]*/
_imopVarPre251 = ilog2(_imopVarPre250);
/*[23]*/
/*[23]*/
logd[i] = _imopVarPre251;
}
}
/*[23]*/
// #pragma omp dummyFlush BARRIER_START
/*[23]*/
#pragma omp barrier
/*[24]*/
dcomplex y0[64][18];
/*[24]*/
dcomplex y1[64][18];
/*[24]*/
#pragma omp for nowait
/*[24]*/
/*[24]*/
/*[24]*/
for (k = 0; k < d[2]; k++) {
/*[24]*/
/*[24]*/
/*[24]*/
/*[24]*/
/*[24]*/
for (ii = 0; ii <= d[0] - fftblock; ii += fftblock) {
/*[24]*/
/*[24]*/
/*[24]*/
/*[24]*/
/*[24]*/
for (j = 0; j < d[1]; j++) {
/*[24]*/
/*[24]*/
/*[24]*/
/*[24]*/
/*[24]*/
for (i = 0; i < fftblock; i++) {
/*[24]*/
/*[24]*/
y0[j][i].real = x[k][j][i + ii].real;
/*[24]*/
y0[j][i].imag = x[k][j][i + ii].imag;
}
}
/*[24]*/
int _imopVarPre254;
/*[24]*/
int _imopVarPre255;
/*[24]*/
_imopVarPre254 = d[1];
/*[24]*/
_imopVarPre255 = logd[1];
/*[24]*/
cfftz(is, _imopVarPre255, _imopVarPre254, y0, y1);
/*[24]*/
/*[24]*/
/*[24]*/
/*[24]*/
/*[24]*/
for (j = 0; j < d[1]; j++) {
/*[24]*/
/*[24]*/
/*[24]*/
/*[24]*/
/*[24]*/
for (i = 0; i < fftblock; i++) {
/*[24]*/
/*[24]*/
xout[k][j][i + ii].real = y0[j][i].real;
/*[24]*/
xout[k][j][i + ii].imag = y0[j][i].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre240 );
/*[]*/
int _imopVarPre241;
/*[]*/
_imopVarPre240 = dims[0];
/*[]*/
_imopVarPre241 = -1;
/*[]*/
int is_imopVarPre97;
/*[]*/
int *d_imopVarPre98;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre99 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre100 )[64][64];
/*[]*/
is_imopVarPre97 = _imopVarPre241;
/*[]*/
d_imopVarPre98 = _imopVarPre240;
/*[]*/
x_imopVarPre99 = x1_imopVarPre113;
/*[]*/
xout_imopVarPre100 = x2_imopVarPre114;
/*[]*/
int logd_imopVarPre101[3];
/*[25]*/
#pragma omp parallel default(shared) shared(is_imopVarPre97)
{
/*[25]*/
/*[25]*/
int i;
/*[25]*/
int j;
/*[25]*/
int k;
/*[25]*/
int jj;
/*[25]*/
#pragma omp master
{
/*[25]*/
/*[25]*/
/*[25]*/
/*[25]*/
/*[25]*/
for (i = 0; i < 3; i++) {
/*[25]*/
/*[25]*/
int _imopVarPre243;
/*[25]*/
int _imopVarPre244;
/*[25]*/
_imopVarPre243 = d_imopVarPre98[i];
/*[25]*/
_imopVarPre244 = ilog2(_imopVarPre243);
/*[25]*/
/*[25]*/
logd_imopVarPre101[i] = _imopVarPre244;
}
}
/*[25]*/
// #pragma omp dummyFlush BARRIER_START
/*[25]*/
#pragma omp barrier
/*[26]*/
dcomplex y0[64][18];
/*[26]*/
dcomplex y1[64][18];
/*[26]*/
#pragma omp for nowait
/*[26]*/
/*[26]*/
/*[26]*/
for (k = 0; k < d_imopVarPre98[2]; k++) {
/*[26]*/
/*[26]*/
/*[26]*/
/*[26]*/
/*[26]*/
for (jj = 0; jj <= d_imopVarPre98[1] - fftblock; jj += fftblock) {
/*[26]*/
/*[26]*/
/*[26]*/
/*[26]*/
/*[26]*/
for (j = 0; j < fftblock; j++) {
/*[26]*/
/*[26]*/
/*[26]*/
/*[26]*/
/*[26]*/
for (i = 0; i < d_imopVarPre98[0]; i++) {
/*[26]*/
/*[26]*/
y0[i][j].real = x_imopVarPre99[k][j + jj][i].real;
/*[26]*/
y0[i][j].imag = x_imopVarPre99[k][j + jj][i].imag;
}
}
/*[26]*/
int _imopVarPre247;
/*[26]*/
int _imopVarPre248;
/*[26]*/
_imopVarPre247 = d_imopVarPre98[0];
/*[26]*/
_imopVarPre248 = logd_imopVarPre101[0];
/*[26]*/
cfftz(is_imopVarPre97, _imopVarPre248, _imopVarPre247, y0, y1);
/*[26]*/
/*[26]*/
/*[26]*/
/*[26]*/
/*[26]*/
for (j = 0; j < fftblock; j++) {
/*[26]*/
/*[26]*/
/*[26]*/
/*[26]*/
/*[26]*/
for (i = 0; i < d_imopVarPre98[0]; i++) {
/*[26]*/
/*[26]*/
xout_imopVarPre100[k][j + jj][i].real = y0[i][j].real;
/*[26]*/
xout_imopVarPre100[k][j + jj][i].imag = y0[i][j].imag;
}
}
}
}
}
}
/*[]*/
/*[]*/
if (0 == 1) {
/*[]*/
/*[]*/
timer_stop(2);
/*[]*/
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (iter = 1; iter <= niter; iter++) {
/*[]*/
/*[]*/
int _imopVarPre159;
/*[]*/
int ( *_imopVarPre161 );
/*[]*/
int ( *_imopVarPre157 );
/*[27]*/
#pragma omp parallel
{
/*[27]*/
/*[27]*/
#pragma omp master
{
/*[27]*/
/*[27]*/
/*[27]*/
if (0 == 1) {
/*[27]*/
/*[27]*/
timer_start(3);
/*[27]*/
}
/*[27]*/
_imopVarPre157 = dims[0];
}
/*[27]*/
// #pragma omp dummyFlush BARRIER_START
/*[27]*/
#pragma omp barrier
/*[28]*/
evolve(u0, u1, iter, indexmap, _imopVarPre157);
/*[28]*/
/*[28]*/
#pragma omp master
{
/*[28]*/
/*[28]*/
/*[28]*/
if (0 == 1) {
/*[28]*/
/*[28]*/
timer_stop(3);
/*[28]*/
}
/*[28]*/
/*[28]*/
if (0 == 1) {
/*[28]*/
/*[28]*/
timer_start(2);
/*[28]*/
}
/*[28]*/
_imopVarPre159 = -1;
}
}
/*[28]*/
int dir_imopVarPre107;
/*[28]*/
struct stUn_imopVarPre11 ( *x1_imopVarPre108 )[64][64];
/*[28]*/
struct stUn_imopVarPre11 ( *x2_imopVarPre109 )[64][64];
/*[28]*/
dir_imopVarPre107 = _imopVarPre159;
/*[28]*/
x1_imopVarPre108 = u1;
/*[28]*/
x2_imopVarPre109 = u2;
/*[28]*/
dcomplex y0_imopVarPre110[64][18];
/*[28]*/
dcomplex y1_imopVarPre111[64][18];
/*[28]*/
/*[28]*/
if (dir_imopVarPre107 == 1) {
/*[28]*/
/*[28]*/
int ( *_imopVarPre225 );
/*[28]*/
_imopVarPre225 = dims[0];
/*[28]*/
int is;
/*[28]*/
int *d;
/*[28]*/
struct stUn_imopVarPre11 ( *x )[64][64];
/*[28]*/
struct stUn_imopVarPre11 ( *xout )[64][64];
/*[28]*/
is = 1;
/*[28]*/
d = _imopVarPre225;
/*[28]*/
x = x1_imopVarPre108;
/*[28]*/
xout = x1_imopVarPre108;
/*[28]*/
int logd[3];
/*[28, 29]*/
#pragma omp parallel default(shared) shared(is)
{
/*[28, 29]*/
/*[28, 29]*/
int i;
/*[28, 29]*/
int j;
/*[28, 29]*/
int k;
/*[28, 29]*/
int jj;
/*[28, 29]*/
#pragma omp master
{
/*[28, 29]*/
/*[28, 29]*/
/*[28, 29]*/
/*[28, 29]*/
/*[28, 29]*/
for (i = 0; i < 3; i++) {
/*[28, 29]*/
/*[28, 29]*/
int _imopVarPre243;
/*[28, 29]*/
int _imopVarPre244;
/*[28, 29]*/
_imopVarPre243 = d[i];
/*[28, 29]*/
_imopVarPre244 = ilog2(_imopVarPre243);
/*[28, 29]*/
/*[28, 29]*/
logd[i] = _imopVarPre244;
}
}
/*[28, 29]*/
// #pragma omp dummyFlush BARRIER_START
/*[28, 29]*/
#pragma omp barrier
/*[28, 30]*/
dcomplex y0[64][18];
/*[28, 30]*/
dcomplex y1[64][18];
/*[28, 30]*/
#pragma omp for nowait
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
for (k = 0; k < d[2]; k++) {
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
for (jj = 0; jj <= d[1] - fftblock; jj += fftblock) {
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
for (j = 0; j < fftblock; j++) {
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
for (i = 0; i < d[0]; i++) {
/*[28, 30]*/
/*[28, 30]*/
y0[i][j].real = x[k][j + jj][i].real;
/*[28, 30]*/
y0[i][j].imag = x[k][j + jj][i].imag;
}
}
/*[28, 30]*/
int _imopVarPre247;
/*[28, 30]*/
int _imopVarPre248;
/*[28, 30]*/
_imopVarPre247 = d[0];
/*[28, 30]*/
_imopVarPre248 = logd[0];
/*[28, 30]*/
cfftz(is, _imopVarPre248, _imopVarPre247, y0, y1);
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
for (j = 0; j < fftblock; j++) {
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
/*[28, 30]*/
for (i = 0; i < d[0]; i++) {
/*[28, 30]*/
/*[28, 30]*/
xout[k][j + jj][i].real = y0[i][j].real;
/*[28, 30]*/
xout[k][j + jj][i].imag = y0[i][j].imag;
}
}
}
}
}
/*[28]*/
int ( *_imopVarPre227 );
/*[28]*/
_imopVarPre227 = dims[1];
/*[28]*/
int is_imopVarPre76;
/*[28]*/
int *d_imopVarPre77;
/*[28]*/
struct stUn_imopVarPre11 ( *x_imopVarPre78 )[64][64];
/*[28]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre79 )[64][64];
/*[28]*/
is_imopVarPre76 = 1;
/*[28]*/
d_imopVarPre77 = _imopVarPre227;
/*[28]*/
x_imopVarPre78 = x1_imopVarPre108;
/*[28]*/
xout_imopVarPre79 = x1_imopVarPre108;
/*[28]*/
int logd_imopVarPre80[3];
/*[28, 31]*/
#pragma omp parallel default(shared) shared(is_imopVarPre76)
{
/*[28, 31]*/
/*[28, 31]*/
int i;
/*[28, 31]*/
int j;
/*[28, 31]*/
int k;
/*[28, 31]*/
int ii;
/*[28, 31]*/
#pragma omp master
{
/*[28, 31]*/
/*[28, 31]*/
/*[28, 31]*/
/*[28, 31]*/
/*[28, 31]*/
for (i = 0; i < 3; i++) {
/*[28, 31]*/
/*[28, 31]*/
int _imopVarPre250;
/*[28, 31]*/
int _imopVarPre251;
/*[28, 31]*/
_imopVarPre250 = d_imopVarPre77[i];
/*[28, 31]*/
_imopVarPre251 = ilog2(_imopVarPre250);
/*[28, 31]*/
/*[28, 31]*/
logd_imopVarPre80[i] = _imopVarPre251;
}
}
/*[28, 31]*/
// #pragma omp dummyFlush BARRIER_START
/*[28, 31]*/
#pragma omp barrier
/*[28, 32]*/
dcomplex y0[64][18];
/*[28, 32]*/
dcomplex y1[64][18];
/*[28, 32]*/
#pragma omp for nowait
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
for (k = 0; k < d_imopVarPre77[2]; k++) {
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
for (ii = 0; ii <= d_imopVarPre77[0] - fftblock; ii += fftblock) {
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
for (j = 0; j < d_imopVarPre77[1]; j++) {
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
for (i = 0; i < fftblock; i++) {
/*[28, 32]*/
/*[28, 32]*/
y0[j][i].real = x_imopVarPre78[k][j][i + ii].real;
/*[28, 32]*/
y0[j][i].imag = x_imopVarPre78[k][j][i + ii].imag;
}
}
/*[28, 32]*/
int _imopVarPre254;
/*[28, 32]*/
int _imopVarPre255;
/*[28, 32]*/
_imopVarPre254 = d_imopVarPre77[1];
/*[28, 32]*/
_imopVarPre255 = logd_imopVarPre80[1];
/*[28, 32]*/
cfftz(is_imopVarPre76, _imopVarPre255, _imopVarPre254, y0, y1);
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
for (j = 0; j < d_imopVarPre77[1]; j++) {
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
/*[28, 32]*/
for (i = 0; i < fftblock; i++) {
/*[28, 32]*/
/*[28, 32]*/
xout_imopVarPre79[k][j][i + ii].real = y0[j][i].real;
/*[28, 32]*/
xout_imopVarPre79[k][j][i + ii].imag = y0[j][i].imag;
}
}
}
}
}
/*[28]*/
int ( *_imopVarPre229 );
/*[28]*/
_imopVarPre229 = dims[2];
/*[28]*/
int is_imopVarPre81;
/*[28]*/
int *d_imopVarPre82;
/*[28]*/
struct stUn_imopVarPre11 ( *x_imopVarPre83 )[64][64];
/*[28]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre84 )[64][64];
/*[28]*/
is_imopVarPre81 = 1;
/*[28]*/
d_imopVarPre82 = _imopVarPre229;
/*[28]*/
x_imopVarPre83 = x1_imopVarPre108;
/*[28]*/
xout_imopVarPre84 = x2_imopVarPre109;
/*[28]*/
int logd_imopVarPre85[3];
/*[28, 33]*/
#pragma omp parallel default(shared) shared(is_imopVarPre81)
{
/*[28, 33]*/
/*[28, 33]*/
int i;
/*[28, 33]*/
int j;
/*[28, 33]*/
int k;
/*[28, 33]*/
int ii;
/*[28, 33]*/
#pragma omp master
{
/*[28, 33]*/
/*[28, 33]*/
/*[28, 33]*/
/*[28, 33]*/
/*[28, 33]*/
for (i = 0; i < 3; i++) {
/*[28, 33]*/
/*[28, 33]*/
int _imopVarPre257;
/*[28, 33]*/
int _imopVarPre258;
/*[28, 33]*/
_imopVarPre257 = d_imopVarPre82[i];
/*[28, 33]*/
_imopVarPre258 = ilog2(_imopVarPre257);
/*[28, 33]*/
/*[28, 33]*/
logd_imopVarPre85[i] = _imopVarPre258;
}
}
/*[28, 33]*/
// #pragma omp dummyFlush BARRIER_START
/*[28, 33]*/
#pragma omp barrier
/*[28, 34]*/
dcomplex y0[64][18];
/*[28, 34]*/
dcomplex y1[64][18];
/*[28, 34]*/
#pragma omp for nowait
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
for (j = 0; j < d_imopVarPre82[1]; j++) {
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
for (ii = 0; ii <= d_imopVarPre82[0] - fftblock; ii += fftblock) {
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
for (k = 0; k < d_imopVarPre82[2]; k++) {
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
for (i = 0; i < fftblock; i++) {
/*[28, 34]*/
/*[28, 34]*/
y0[k][i].real = x_imopVarPre83[k][j][i + ii].real;
/*[28, 34]*/
y0[k][i].imag = x_imopVarPre83[k][j][i + ii].imag;
}
}
/*[28, 34]*/
int _imopVarPre261;
/*[28, 34]*/
int _imopVarPre262;
/*[28, 34]*/
_imopVarPre261 = d_imopVarPre82[2];
/*[28, 34]*/
_imopVarPre262 = logd_imopVarPre85[2];
/*[28, 34]*/
cfftz(is_imopVarPre81, _imopVarPre262, _imopVarPre261, y0, y1);
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
for (k = 0; k < d_imopVarPre82[2]; k++) {
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
/*[28, 34]*/
for (i = 0; i < fftblock; i++) {
/*[28, 34]*/
/*[28, 34]*/
xout_imopVarPre84[k][j][i + ii].real = y0[k][i].real;
/*[28, 34]*/
xout_imopVarPre84[k][j][i + ii].imag = y0[k][i].imag;
}
}
}
}
}
} else {
/*[28]*/
/*[28]*/
int ( *_imopVarPre232 );
/*[28]*/
int _imopVarPre233;
/*[28]*/
_imopVarPre232 = dims[2];
/*[28]*/
_imopVarPre233 = -1;
/*[28]*/
int is_imopVarPre102;
/*[28]*/
int *d_imopVarPre103;
/*[28]*/
struct stUn_imopVarPre11 ( *x_imopVarPre104 )[64][64];
/*[28]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre105 )[64][64];
/*[28]*/
is_imopVarPre102 = _imopVarPre233;
/*[28]*/
d_imopVarPre103 = _imopVarPre232;
/*[28]*/
x_imopVarPre104 = x1_imopVarPre108;
/*[28]*/
xout_imopVarPre105 = x1_imopVarPre108;
/*[28]*/
int logd_imopVarPre106[3];
/*[28, 35]*/
#pragma omp parallel default(shared) shared(is_imopVarPre102)
{
/*[28, 35]*/
/*[28, 35]*/
int i;
/*[28, 35]*/
int j;
/*[28, 35]*/
int k;
/*[28, 35]*/
int ii;
/*[28, 35]*/
#pragma omp master
{
/*[28, 35]*/
/*[28, 35]*/
/*[28, 35]*/
/*[28, 35]*/
/*[28, 35]*/
for (i = 0; i < 3; i++) {
/*[28, 35]*/
/*[28, 35]*/
int _imopVarPre257;
/*[28, 35]*/
int _imopVarPre258;
/*[28, 35]*/
_imopVarPre257 = d_imopVarPre103[i];
/*[28, 35]*/
_imopVarPre258 = ilog2(_imopVarPre257);
/*[28, 35]*/
/*[28, 35]*/
logd_imopVarPre106[i] = _imopVarPre258;
}
}
/*[28, 35]*/
// #pragma omp dummyFlush BARRIER_START
/*[28, 35]*/
#pragma omp barrier
/*[28, 36]*/
dcomplex y0[64][18];
/*[28, 36]*/
dcomplex y1[64][18];
/*[28, 36]*/
#pragma omp for nowait
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
for (j = 0; j < d_imopVarPre103[1]; j++) {
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
for (ii = 0; ii <= d_imopVarPre103[0] - fftblock; ii += fftblock) {
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
for (k = 0; k < d_imopVarPre103[2]; k++) {
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
for (i = 0; i < fftblock; i++) {
/*[28, 36]*/
/*[28, 36]*/
y0[k][i].real = x_imopVarPre104[k][j][i + ii].real;
/*[28, 36]*/
y0[k][i].imag = x_imopVarPre104[k][j][i + ii].imag;
}
}
/*[28, 36]*/
int _imopVarPre261;
/*[28, 36]*/
int _imopVarPre262;
/*[28, 36]*/
_imopVarPre261 = d_imopVarPre103[2];
/*[28, 36]*/
_imopVarPre262 = logd_imopVarPre106[2];
/*[28, 36]*/
cfftz(is_imopVarPre102, _imopVarPre262, _imopVarPre261, y0, y1);
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
for (k = 0; k < d_imopVarPre103[2]; k++) {
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
/*[28, 36]*/
for (i = 0; i < fftblock; i++) {
/*[28, 36]*/
/*[28, 36]*/
xout_imopVarPre105[k][j][i + ii].real = y0[k][i].real;
/*[28, 36]*/
xout_imopVarPre105[k][j][i + ii].imag = y0[k][i].imag;
}
}
}
}
}
/*[28]*/
int ( *_imopVarPre236 );
/*[28]*/
int _imopVarPre237;
/*[28]*/
_imopVarPre236 = dims[1];
/*[28]*/
_imopVarPre237 = -1;
/*[28]*/
int is;
/*[28]*/
int *d;
/*[28]*/
struct stUn_imopVarPre11 ( *x )[64][64];
/*[28]*/
struct stUn_imopVarPre11 ( *xout )[64][64];
/*[28]*/
is = _imopVarPre237;
/*[28]*/
d = _imopVarPre236;
/*[28]*/
x = x1_imopVarPre108;
/*[28]*/
xout = x1_imopVarPre108;
/*[28]*/
int logd[3];
/*[28, 37]*/
#pragma omp parallel default(shared) shared(is)
{
/*[28, 37]*/
/*[28, 37]*/
int i;
/*[28, 37]*/
int j;
/*[28, 37]*/
int k;
/*[28, 37]*/
int ii;
/*[28, 37]*/
#pragma omp master
{
/*[28, 37]*/
/*[28, 37]*/
/*[28, 37]*/
/*[28, 37]*/
/*[28, 37]*/
for (i = 0; i < 3; i++) {
/*[28, 37]*/
/*[28, 37]*/
int _imopVarPre250;
/*[28, 37]*/
int _imopVarPre251;
/*[28, 37]*/
_imopVarPre250 = d[i];
/*[28, 37]*/
_imopVarPre251 = ilog2(_imopVarPre250);
/*[28, 37]*/
/*[28, 37]*/
logd[i] = _imopVarPre251;
}
}
/*[28, 37]*/
// #pragma omp dummyFlush BARRIER_START
/*[28, 37]*/
#pragma omp barrier
/*[28, 38]*/
dcomplex y0[64][18];
/*[28, 38]*/
dcomplex y1[64][18];
/*[28, 38]*/
#pragma omp for nowait
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
for (k = 0; k < d[2]; k++) {
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
for (ii = 0; ii <= d[0] - fftblock; ii += fftblock) {
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
for (j = 0; j < d[1]; j++) {
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
for (i = 0; i < fftblock; i++) {
/*[28, 38]*/
/*[28, 38]*/
y0[j][i].real = x[k][j][i + ii].real;
/*[28, 38]*/
y0[j][i].imag = x[k][j][i + ii].imag;
}
}
/*[28, 38]*/
int _imopVarPre254;
/*[28, 38]*/
int _imopVarPre255;
/*[28, 38]*/
_imopVarPre254 = d[1];
/*[28, 38]*/
_imopVarPre255 = logd[1];
/*[28, 38]*/
cfftz(is, _imopVarPre255, _imopVarPre254, y0, y1);
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
for (j = 0; j < d[1]; j++) {
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
/*[28, 38]*/
for (i = 0; i < fftblock; i++) {
/*[28, 38]*/
/*[28, 38]*/
xout[k][j][i + ii].real = y0[j][i].real;
/*[28, 38]*/
xout[k][j][i + ii].imag = y0[j][i].imag;
}
}
}
}
}
/*[28]*/
int ( *_imopVarPre240 );
/*[28]*/
int _imopVarPre241;
/*[28]*/
_imopVarPre240 = dims[0];
/*[28]*/
_imopVarPre241 = -1;
/*[28]*/
int is_imopVarPre97;
/*[28]*/
int *d_imopVarPre98;
/*[28]*/
struct stUn_imopVarPre11 ( *x_imopVarPre99 )[64][64];
/*[28]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre100 )[64][64];
/*[28]*/
is_imopVarPre97 = _imopVarPre241;
/*[28]*/
d_imopVarPre98 = _imopVarPre240;
/*[28]*/
x_imopVarPre99 = x1_imopVarPre108;
/*[28]*/
xout_imopVarPre100 = x2_imopVarPre109;
/*[28]*/
int logd_imopVarPre101[3];
/*[28, 39]*/
#pragma omp parallel default(shared) shared(is_imopVarPre97)
{
/*[28, 39]*/
/*[28, 39]*/
int i;
/*[28, 39]*/
int j;
/*[28, 39]*/
int k;
/*[28, 39]*/
int jj;
/*[28, 39]*/
#pragma omp master
{
/*[28, 39]*/
/*[28, 39]*/
/*[28, 39]*/
/*[28, 39]*/
/*[28, 39]*/
for (i = 0; i < 3; i++) {
/*[28, 39]*/
/*[28, 39]*/
int _imopVarPre243;
/*[28, 39]*/
int _imopVarPre244;
/*[28, 39]*/
_imopVarPre243 = d_imopVarPre98[i];
/*[28, 39]*/
_imopVarPre244 = ilog2(_imopVarPre243);
/*[28, 39]*/
/*[28, 39]*/
logd_imopVarPre101[i] = _imopVarPre244;
}
}
/*[28, 39]*/
// #pragma omp dummyFlush BARRIER_START
/*[28, 39]*/
#pragma omp barrier
/*[28, 40]*/
dcomplex y0[64][18];
/*[28, 40]*/
dcomplex y1[64][18];
/*[28, 40]*/
#pragma omp for nowait
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
for (k = 0; k < d_imopVarPre98[2]; k++) {
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
for (jj = 0; jj <= d_imopVarPre98[1] - fftblock; jj += fftblock) {
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
for (j = 0; j < fftblock; j++) {
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
for (i = 0; i < d_imopVarPre98[0]; i++) {
/*[28, 40]*/
/*[28, 40]*/
y0[i][j].real = x_imopVarPre99[k][j + jj][i].real;
/*[28, 40]*/
y0[i][j].imag = x_imopVarPre99[k][j + jj][i].imag;
}
}
/*[28, 40]*/
int _imopVarPre247;
/*[28, 40]*/
int _imopVarPre248;
/*[28, 40]*/
_imopVarPre247 = d_imopVarPre98[0];
/*[28, 40]*/
_imopVarPre248 = logd_imopVarPre101[0];
/*[28, 40]*/
cfftz(is_imopVarPre97, _imopVarPre248, _imopVarPre247, y0, y1);
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
for (j = 0; j < fftblock; j++) {
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
/*[28, 40]*/
for (i = 0; i < d_imopVarPre98[0]; i++) {
/*[28, 40]*/
/*[28, 40]*/
xout_imopVarPre100[k][j + jj][i].real = y0[i][j].real;
/*[28, 40]*/
xout_imopVarPre100[k][j + jj][i].imag = y0[i][j].imag;
}
}
}
}
}
}
/*[41]*/
#pragma omp parallel
{
/*[41]*/
/*[41]*/
#pragma omp master
{
/*[41]*/
/*[41]*/
/*[41]*/
if (0 == 1) {
/*[41]*/
/*[41]*/
timer_stop(2);
/*[41]*/
}
/*[41]*/
/*[41]*/
if (0 == 1) {
/*[41]*/
/*[41]*/
timer_start(4);
/*[41]*/
}
/*[41]*/
_imopVarPre161 = dims[0];
}
/*[41]*/
// #pragma omp dummyFlush BARRIER_START
/*[41]*/
#pragma omp barrier
/*[42]*/
int i_imopVarPre117;
/*[42]*/
struct stUn_imopVarPre11 ( *u1_imopVarPre118 )[64][64];
/*[42]*/
int *d_imopVarPre119;
/*[42]*/
i_imopVarPre117 = iter;
/*[42]*/
u1_imopVarPre118 = u2;
/*[42]*/
d_imopVarPre119 = _imopVarPre161;
/*[42]*/
int j_imopVarPre120;
/*[42]*/
int q;
/*[42]*/
int r;
/*[42]*/
int s;
/*[42]*/
dcomplex chk;
/*[42]*/
chk.real = 0.0;
/*[42]*/
chk.imag = 0.0;
/*[42]*/
#pragma omp for nowait
/*[42]*/
/*[42]*/
/*[42]*/
for (j_imopVarPre120 = 1; j_imopVarPre120 <= 1024; j_imopVarPre120++) {
/*[42]*/
/*[42]*/
q = j_imopVarPre120 % 64 + 1;
/*[42]*/
int _imopVarPre272;
/*[42]*/
_imopVarPre272 = q >= xstart[0];
/*[42]*/
/*[42]*/
if (_imopVarPre272) {
/*[42]*/
/*[42]*/
_imopVarPre272 = q <= xend[0];
}
/*[42]*/
/*[42]*/
if (_imopVarPre272) {
/*[42]*/
/*[42]*/
r = (3 * j_imopVarPre120) % 64 + 1;
/*[42]*/
int _imopVarPre274;
/*[42]*/
_imopVarPre274 = r >= ystart[0];
/*[42]*/
/*[42]*/
if (_imopVarPre274) {
/*[42]*/
/*[42]*/
_imopVarPre274 = r <= yend[0];
}
/*[42]*/
/*[42]*/
if (_imopVarPre274) {
/*[42]*/
/*[42]*/
s = (5 * j_imopVarPre120) % 64 + 1;
/*[42]*/
int _imopVarPre276;
/*[42]*/
_imopVarPre276 = s >= zstart[0];
/*[42]*/
/*[42]*/
if (_imopVarPre276) {
/*[42]*/
/*[42]*/
_imopVarPre276 = s <= zend[0];
}
/*[42]*/
/*[42]*/
if (_imopVarPre276) {
/*[42]*/
/*[42]*/
chk.real = chk.real + u1_imopVarPre118[s - zstart[0]][r - ystart[0]][q - xstart[0]].real;
/*[42]*/
(chk.imag = chk.imag + u1_imopVarPre118[s - zstart[0]][r - ystart[0]][q - xstart[0]].imag);
}
}
}
}
/*[42]*/
// #pragma omp dummyFlush CRITICAL_START
/*[42]*/
#pragma omp critical
{
/*[42]*/
/*[42]*/
sums[i_imopVarPre117].real += chk.real;
/*[42]*/
sums[i_imopVarPre117].imag += chk.imag;
}
/*[42]*/
// #pragma omp dummyFlush CRITICAL_END
/*[42]*/
// #pragma omp dummyFlush BARRIER_START
/*[42]*/
#pragma omp barrier
/*[43]*/
#pragma omp single nowait
{
/*[43]*/
/*[43]*/
sums[i_imopVarPre117].real = sums[i_imopVarPre117].real / (double) 262144;
/*[43]*/
sums[i_imopVarPre117].imag = sums[i_imopVarPre117].imag / (double) 262144;
/*[43]*/
double _imopVarPre279;
/*[43]*/
double _imopVarPre280;
/*[43]*/
_imopVarPre279 = sums[i_imopVarPre117].imag;
/*[43]*/
_imopVarPre280 = sums[i_imopVarPre117].real;
/*[43]*/
printf("T = %5d Checksum = %22.12e %22.12e\n", i_imopVarPre117, _imopVarPre280, _imopVarPre279);
/*[43]*/
}
/*[43]*/
#pragma omp master
{
/*[43]*/
/*[43]*/
/*[43]*/
if (0 == 1) {
/*[43]*/
/*[43]*/
timer_stop(4);
/*[43]*/
}
}
}
}
/*[]*/
char *_imopVarPre164;
/*[]*/
int *_imopVarPre165;
/*[]*/
_imopVarPre164 = &class;
/*[]*/
_imopVarPre165 = &verified;
/*[]*/
verify(64, 64, 64, niter, _imopVarPre165, _imopVarPre164);
/*[]*/
/*[44]*/
#pragma omp parallel
{
/*[44]*/
}
/*[]*/
timer_stop(0);
/*[]*/
/*[]*/
total_time = timer_read(0);
/*[]*/
/*[]*/
/*[]*/
if (total_time != 0.0) {
/*[]*/
/*[]*/
double _imopVarPre184;
/*[]*/
double _imopVarPre185;
/*[]*/
double _imopVarPre188;
/*[]*/
double _imopVarPre189;
/*[]*/
_imopVarPre184 = (double) 262144;
/*[]*/
_imopVarPre185 = log(_imopVarPre184);
/*[]*/
/*[]*/
_imopVarPre188 = (double) 262144;
/*[]*/
_imopVarPre189 = log(_imopVarPre188);
/*[]*/
/*[]*/
mflops = 1.0e-6 * (double) 262144 * (14.8157 + 7.19641 * _imopVarPre185 + (5.23518 + 7.21113 * _imopVarPre189) * niter) / total_time;
} else {
/*[]*/
/*[]*/
mflops = 0.0;
}
/*[]*/
c_print_results("FT", class, 64, 64, 64, niter, nthreads, total_time, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "randdp");
/*[]*/
/*[]*/
/*[]*/
if (0 == 1) {
/*[]*/
/*[]*/
print_timers();
/*[]*/
}
}
/*[28]*/
/*[28]*/
/*[28]*/
/*[28]*/
/*[28]*/
/*[28]*/
static void evolve(dcomplex u0[64][64][64], dcomplex u1[64][64][64] , int t , int indexmap[64][64][64] , int d[3]) {
/*[28]*/
/*[28]*/
int i;
/*[28]*/
int j;
/*[28]*/
int k;
/*[28]*/
#pragma omp for nowait
/*[28]*/
/*[28]*/
/*[28]*/
for (k = 0; k < d[2]; k++) {
/*[28]*/
/*[28]*/
/*[28]*/
/*[28]*/
/*[28]*/
for (j = 0; j < d[1]; j++) {
/*[28]*/
/*[28]*/
/*[28]*/
/*[28]*/
/*[28]*/
for (i = 0; i < d[0]; i++) {
/*[28]*/
/*[28]*/
u1[k][j][i].real = u0[k][j][i].real * ex[t * indexmap[k][j][i]];
/*[28]*/
(u1[k][j][i].imag = u0[k][j][i].imag * ex[t * indexmap[k][j][i]]);
}
}
}
}
/*[]*/
/*[]*/
/*[]*/
static void compute_initial_conditions(dcomplex u0[64][64][64], int d[3]) {
/*[]*/
/*[]*/
int k;
/*[]*/
double x0;
/*[]*/
double start;
/*[]*/
double an;
/*[]*/
double dummy;
/*[]*/
static double tmp[64 * 2 * 64 + 1];
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int t;
/*[]*/
start = 314159265.0;
/*[]*/
double *_imopVarPre192;
/*[]*/
int _imopVarPre193;
/*[]*/
_imopVarPre192 = &an;
/*[]*/
_imopVarPre193 = (zstart[0] - 1) * 2 * 64 * 64 + (ystart[0] - 1) * 2 * 64;
/*[]*/
ipow46(1220703125.0, _imopVarPre193, _imopVarPre192);
/*[]*/
/*[]*/
double *_imopVarPre195;
/*[]*/
double _imopVarPre196;
/*[]*/
_imopVarPre195 = &start;
/*[]*/
_imopVarPre196 = randlc(_imopVarPre195, an);
/*[]*/
/*[]*/
dummy = _imopVarPre196;
/*[]*/
double *_imopVarPre199;
/*[]*/
int _imopVarPre200;
/*[]*/
_imopVarPre199 = &an;
/*[]*/
_imopVarPre200 = 2 * 64 * 64;
/*[]*/
ipow46(1220703125.0, _imopVarPre200, _imopVarPre199);
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 0; k < dims[0][2]; k++) {
/*[]*/
/*[]*/
x0 = start;
/*[]*/
double *_imopVarPre203;
/*[]*/
int _imopVarPre204;
/*[]*/
_imopVarPre203 = &x0;
/*[]*/
_imopVarPre204 = 2 * 64 * dims[0][1];
/*[]*/
vranlc(_imopVarPre204, _imopVarPre203, 1220703125.0, tmp);
/*[]*/
/*[]*/
t = 1;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = 0; j < dims[0][1]; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i < 64; i++) {
/*[]*/
/*[]*/
u0[k][j][i].real = tmp[t++];
/*[]*/
u0[k][j][i].imag = tmp[t++];
}
}
/*[]*/
/*[]*/
if (k != dims[0][2]) {
/*[]*/
/*[]*/
double *_imopVarPre206;
/*[]*/
double _imopVarPre207;
/*[]*/
_imopVarPre206 = &start;
/*[]*/
_imopVarPre207 = randlc(_imopVarPre206, an);
/*[]*/
/*[]*/
dummy = _imopVarPre207;
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
static void ipow46(double a, int exponent , double *result) {
/*[]*/
/*[]*/
double dummy;
/*[]*/
double q;
/*[]*/
double r;
/*[]*/
int n;
/*[]*/
int n2;
/*[]*/
*result = 1;
/*[]*/
/*[]*/
if (exponent == 0) {
/*[]*/
/*[]*/
return;
}
/*[]*/
q = a;
/*[]*/
r = 1;
/*[]*/
n = exponent;
/*[]*/
/*[]*/
while (n > 1) {
/*[]*/
/*[]*/
n2 = n / 2;
/*[]*/
/*[]*/
if (n2 * 2 == n) {
/*[]*/
/*[]*/
double *_imopVarPre209;
/*[]*/
double _imopVarPre210;
/*[]*/
_imopVarPre209 = &q;
/*[]*/
_imopVarPre210 = randlc(_imopVarPre209, q);
/*[]*/
/*[]*/
dummy = _imopVarPre210;
/*[]*/
n = n2;
} else {
/*[]*/
/*[]*/
double *_imopVarPre212;
/*[]*/
double _imopVarPre213;
/*[]*/
_imopVarPre212 = &r;
/*[]*/
_imopVarPre213 = randlc(_imopVarPre212, q);
/*[]*/
/*[]*/
dummy = _imopVarPre213;
/*[]*/
n = n - 1;
}
}
/*[]*/
double *_imopVarPre215;
/*[]*/
double _imopVarPre216;
/*[]*/
_imopVarPre215 = &r;
/*[]*/
_imopVarPre216 = randlc(_imopVarPre215, q);
/*[]*/
/*[]*/
dummy = _imopVarPre216;
/*[]*/
*result = r;
}
/*[]*/
static void setup() {
/*[]*/
/*[]*/
int i;
/*[]*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - FT Benchmark\n\n");
/*[]*/
/*[]*/
niter = 6;
/*[]*/
printf(" Size : %3dx%3dx%3d\n", 64, 64, 64);
/*[]*/
/*[]*/
printf(" Iterations : %7d\n", niter);
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i < 3; i++) {
/*[]*/
/*[]*/
dims[i][0] = 64;
/*[]*/
dims[i][1] = 64;
/*[]*/
dims[i][2] = 64;
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i < 3; i++) {
/*[]*/
/*[]*/
xstart[i] = 1;
/*[]*/
xend[i] = 64;
/*[]*/
ystart[i] = 1;
/*[]*/
yend[i] = 64;
/*[]*/
zstart[i] = 1;
/*[]*/
zend[i] = 64;
}
/*[]*/
fftblock = 16;
/*[]*/
fftblockpad = 18;
/*[]*/
/*[]*/
if (fftblock != 16) {
/*[]*/
/*[]*/
fftblockpad = fftblock + 3;
}
}
/*[]*/
/*[]*/
/*[]*/
static void compute_indexmap(int indexmap[64][64][64], int d[3]) {
/*[]*/
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int ii;
/*[]*/
int ii2;
/*[]*/
int jj;
/*[]*/
int ij2;
/*[]*/
int kk;
/*[]*/
double ap;
/*[45]*/
#pragma omp parallel default(shared) private(i, j, k, ii, ii2, jj, ij2, kk)
{
/*[45]*/
/*[45]*/
#pragma omp for nowait
/*[45]*/
/*[45]*/
/*[45]*/
for (i = 0; i < dims[2][0]; i++) {
/*[45]*/
/*[45]*/
ii = (i + 1 + xstart[2] - 2 + 64 / 2) % 64 - 64 / 2;
/*[45]*/
ii2 = ii * ii;
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (j = 0; j < dims[2][1]; j++) {
/*[45]*/
/*[45]*/
jj = (j + 1 + ystart[2] - 2 + 64 / 2) % 64 - 64 / 2;
/*[45]*/
ij2 = jj * jj + ii2;
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (k = 0; k < dims[2][2]; k++) {
/*[45]*/
/*[45]*/
kk = (k + 1 + zstart[2] - 2 + 64 / 2) % 64 - 64 / 2;
/*[45]*/
indexmap[k][j][i] = kk * kk + ij2;
}
}
}
}
/*[]*/
ap = -4.0 * 1.0e-6 * 3.141592653589793238 * 3.141592653589793238;
/*[]*/
ex[0] = 1.0;
/*[]*/
double _imopVarPre217;
/*[]*/
_imopVarPre217 = exp(ap);
/*[]*/
/*[]*/
ex[1] = _imopVarPre217;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 2; i <= (6 * (64 * 64 / 4 + 64 * 64 / 4 + 64 * 64 / 4)); i++) {
/*[]*/
/*[]*/
ex[i] = ex[i - 1] * ex[1];
}
}
/*[]*/
static void print_timers() {
/*[]*/
/*[]*/
int i;
/*[]*/
char *tstrings[] = {" total ", " setup " , " fft " , " evolve " , " checksum " , " fftlow " , " fftcopy "};
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i < 7; i++) {
/*[]*/
/*[]*/
double _imopVarPre219;
/*[]*/
_imopVarPre219 = timer_read(i);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre219 != 0.0) {
/*[]*/
/*[]*/
double _imopVarPre222;
/*[]*/
char *_imopVarPre223;
/*[]*/
_imopVarPre222 = timer_read(i);
/*[]*/
/*[]*/
_imopVarPre223 = tstrings[i];
/*[]*/
printf("timer %2d(%16s( :%10.6f\n", i, _imopVarPre223, _imopVarPre222);
/*[]*/
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
static void fft(int dir, dcomplex x1[64][64][64] , dcomplex x2[64][64][64]) {
/*[]*/
/*[]*/
dcomplex y0[64][18];
/*[]*/
dcomplex y1[64][18];
/*[]*/
/*[]*/
if (dir == 1) {
/*[]*/
/*[]*/
int ( *_imopVarPre225 );
/*[]*/
_imopVarPre225 = dims[0];
/*[]*/
int is;
/*[]*/
int *d;
/*[]*/
struct stUn_imopVarPre11 ( *x )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout )[64][64];
/*[]*/
is = 1;
/*[]*/
d = _imopVarPre225;
/*[]*/
x = x1;
/*[]*/
xout = x1;
/*[]*/
int logd[3];
/*[46]*/
#pragma omp parallel default(shared) shared(is)
{
/*[46]*/
/*[46]*/
int i;
/*[46]*/
int j;
/*[46]*/
int k;
/*[46]*/
int jj;
/*[46]*/
#pragma omp master
{
/*[46]*/
/*[46]*/
/*[46]*/
/*[46]*/
/*[46]*/
for (i = 0; i < 3; i++) {
/*[46]*/
/*[46]*/
int _imopVarPre243;
/*[46]*/
int _imopVarPre244;
/*[46]*/
_imopVarPre243 = d[i];
/*[46]*/
_imopVarPre244 = ilog2(_imopVarPre243);
/*[46]*/
/*[46]*/
logd[i] = _imopVarPre244;
}
}
/*[46]*/
// #pragma omp dummyFlush BARRIER_START
/*[46]*/
#pragma omp barrier
/*[47]*/
dcomplex y0[64][18];
/*[47]*/
dcomplex y1[64][18];
/*[47]*/
#pragma omp for nowait
/*[47]*/
/*[47]*/
/*[47]*/
for (k = 0; k < d[2]; k++) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (jj = 0; jj <= d[1] - fftblock; jj += fftblock) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (j = 0; j < fftblock; j++) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (i = 0; i < d[0]; i++) {
/*[47]*/
/*[47]*/
y0[i][j].real = x[k][j + jj][i].real;
/*[47]*/
y0[i][j].imag = x[k][j + jj][i].imag;
}
}
/*[47]*/
int _imopVarPre247;
/*[47]*/
int _imopVarPre248;
/*[47]*/
_imopVarPre247 = d[0];
/*[47]*/
_imopVarPre248 = logd[0];
/*[47]*/
cfftz(is, _imopVarPre248, _imopVarPre247, y0, y1);
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (j = 0; j < fftblock; j++) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (i = 0; i < d[0]; i++) {
/*[47]*/
/*[47]*/
xout[k][j + jj][i].real = y0[i][j].real;
/*[47]*/
xout[k][j + jj][i].imag = y0[i][j].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre227 );
/*[]*/
_imopVarPre227 = dims[1];
/*[]*/
int is_imopVarPre76;
/*[]*/
int *d_imopVarPre77;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre78 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre79 )[64][64];
/*[]*/
is_imopVarPre76 = 1;
/*[]*/
d_imopVarPre77 = _imopVarPre227;
/*[]*/
x_imopVarPre78 = x1;
/*[]*/
xout_imopVarPre79 = x1;
/*[]*/
int logd_imopVarPre80[3];
/*[48]*/
#pragma omp parallel default(shared) shared(is_imopVarPre76)
{
/*[48]*/
/*[48]*/
int i;
/*[48]*/
int j;
/*[48]*/
int k;
/*[48]*/
int ii;
/*[48]*/
#pragma omp master
{
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (i = 0; i < 3; i++) {
/*[48]*/
/*[48]*/
int _imopVarPre250;
/*[48]*/
int _imopVarPre251;
/*[48]*/
_imopVarPre250 = d_imopVarPre77[i];
/*[48]*/
_imopVarPre251 = ilog2(_imopVarPre250);
/*[48]*/
/*[48]*/
logd_imopVarPre80[i] = _imopVarPre251;
}
}
/*[48]*/
// #pragma omp dummyFlush BARRIER_START
/*[48]*/
#pragma omp barrier
/*[49]*/
dcomplex y0[64][18];
/*[49]*/
dcomplex y1[64][18];
/*[49]*/
#pragma omp for nowait
/*[49]*/
/*[49]*/
/*[49]*/
for (k = 0; k < d_imopVarPre77[2]; k++) {
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
for (ii = 0; ii <= d_imopVarPre77[0] - fftblock; ii += fftblock) {
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
for (j = 0; j < d_imopVarPre77[1]; j++) {
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
for (i = 0; i < fftblock; i++) {
/*[49]*/
/*[49]*/
y0[j][i].real = x_imopVarPre78[k][j][i + ii].real;
/*[49]*/
y0[j][i].imag = x_imopVarPre78[k][j][i + ii].imag;
}
}
/*[49]*/
int _imopVarPre254;
/*[49]*/
int _imopVarPre255;
/*[49]*/
_imopVarPre254 = d_imopVarPre77[1];
/*[49]*/
_imopVarPre255 = logd_imopVarPre80[1];
/*[49]*/
cfftz(is_imopVarPre76, _imopVarPre255, _imopVarPre254, y0, y1);
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
for (j = 0; j < d_imopVarPre77[1]; j++) {
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
for (i = 0; i < fftblock; i++) {
/*[49]*/
/*[49]*/
xout_imopVarPre79[k][j][i + ii].real = y0[j][i].real;
/*[49]*/
xout_imopVarPre79[k][j][i + ii].imag = y0[j][i].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre229 );
/*[]*/
_imopVarPre229 = dims[2];
/*[]*/
int is_imopVarPre81;
/*[]*/
int *d_imopVarPre82;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre83 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre84 )[64][64];
/*[]*/
is_imopVarPre81 = 1;
/*[]*/
d_imopVarPre82 = _imopVarPre229;
/*[]*/
x_imopVarPre83 = x1;
/*[]*/
xout_imopVarPre84 = x2;
/*[]*/
int logd_imopVarPre85[3];
/*[50]*/
#pragma omp parallel default(shared) shared(is_imopVarPre81)
{
/*[50]*/
/*[50]*/
int i;
/*[50]*/
int j;
/*[50]*/
int k;
/*[50]*/
int ii;
/*[50]*/
#pragma omp master
{
/*[50]*/
/*[50]*/
/*[50]*/
/*[50]*/
/*[50]*/
for (i = 0; i < 3; i++) {
/*[50]*/
/*[50]*/
int _imopVarPre257;
/*[50]*/
int _imopVarPre258;
/*[50]*/
_imopVarPre257 = d_imopVarPre82[i];
/*[50]*/
_imopVarPre258 = ilog2(_imopVarPre257);
/*[50]*/
/*[50]*/
logd_imopVarPre85[i] = _imopVarPre258;
}
}
/*[50]*/
// #pragma omp dummyFlush BARRIER_START
/*[50]*/
#pragma omp barrier
/*[51]*/
dcomplex y0[64][18];
/*[51]*/
dcomplex y1[64][18];
/*[51]*/
#pragma omp for nowait
/*[51]*/
/*[51]*/
/*[51]*/
for (j = 0; j < d_imopVarPre82[1]; j++) {
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
for (ii = 0; ii <= d_imopVarPre82[0] - fftblock; ii += fftblock) {
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
for (k = 0; k < d_imopVarPre82[2]; k++) {
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
for (i = 0; i < fftblock; i++) {
/*[51]*/
/*[51]*/
y0[k][i].real = x_imopVarPre83[k][j][i + ii].real;
/*[51]*/
y0[k][i].imag = x_imopVarPre83[k][j][i + ii].imag;
}
}
/*[51]*/
int _imopVarPre261;
/*[51]*/
int _imopVarPre262;
/*[51]*/
_imopVarPre261 = d_imopVarPre82[2];
/*[51]*/
_imopVarPre262 = logd_imopVarPre85[2];
/*[51]*/
cfftz(is_imopVarPre81, _imopVarPre262, _imopVarPre261, y0, y1);
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
for (k = 0; k < d_imopVarPre82[2]; k++) {
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
for (i = 0; i < fftblock; i++) {
/*[51]*/
/*[51]*/
xout_imopVarPre84[k][j][i + ii].real = y0[k][i].real;
/*[51]*/
xout_imopVarPre84[k][j][i + ii].imag = y0[k][i].imag;
}
}
}
}
}
} else {
/*[]*/
/*[]*/
int ( *_imopVarPre232 );
/*[]*/
int _imopVarPre233;
/*[]*/
_imopVarPre232 = dims[2];
/*[]*/
_imopVarPre233 = -1;
/*[]*/
int is_imopVarPre102;
/*[]*/
int *d_imopVarPre103;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre104 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre105 )[64][64];
/*[]*/
is_imopVarPre102 = _imopVarPre233;
/*[]*/
d_imopVarPre103 = _imopVarPre232;
/*[]*/
x_imopVarPre104 = x1;
/*[]*/
xout_imopVarPre105 = x1;
/*[]*/
int logd_imopVarPre106[3];
/*[52]*/
#pragma omp parallel default(shared) shared(is_imopVarPre102)
{
/*[52]*/
/*[52]*/
int i;
/*[52]*/
int j;
/*[52]*/
int k;
/*[52]*/
int ii;
/*[52]*/
#pragma omp master
{
/*[52]*/
/*[52]*/
/*[52]*/
/*[52]*/
/*[52]*/
for (i = 0; i < 3; i++) {
/*[52]*/
/*[52]*/
int _imopVarPre257;
/*[52]*/
int _imopVarPre258;
/*[52]*/
_imopVarPre257 = d_imopVarPre103[i];
/*[52]*/
_imopVarPre258 = ilog2(_imopVarPre257);
/*[52]*/
/*[52]*/
logd_imopVarPre106[i] = _imopVarPre258;
}
}
/*[52]*/
// #pragma omp dummyFlush BARRIER_START
/*[52]*/
#pragma omp barrier
/*[53]*/
dcomplex y0[64][18];
/*[53]*/
dcomplex y1[64][18];
/*[53]*/
#pragma omp for nowait
/*[53]*/
/*[53]*/
/*[53]*/
for (j = 0; j < d_imopVarPre103[1]; j++) {
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
for (ii = 0; ii <= d_imopVarPre103[0] - fftblock; ii += fftblock) {
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
for (k = 0; k < d_imopVarPre103[2]; k++) {
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
for (i = 0; i < fftblock; i++) {
/*[53]*/
/*[53]*/
y0[k][i].real = x_imopVarPre104[k][j][i + ii].real;
/*[53]*/
y0[k][i].imag = x_imopVarPre104[k][j][i + ii].imag;
}
}
/*[53]*/
int _imopVarPre261;
/*[53]*/
int _imopVarPre262;
/*[53]*/
_imopVarPre261 = d_imopVarPre103[2];
/*[53]*/
_imopVarPre262 = logd_imopVarPre106[2];
/*[53]*/
cfftz(is_imopVarPre102, _imopVarPre262, _imopVarPre261, y0, y1);
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
for (k = 0; k < d_imopVarPre103[2]; k++) {
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
for (i = 0; i < fftblock; i++) {
/*[53]*/
/*[53]*/
xout_imopVarPre105[k][j][i + ii].real = y0[k][i].real;
/*[53]*/
xout_imopVarPre105[k][j][i + ii].imag = y0[k][i].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre236 );
/*[]*/
int _imopVarPre237;
/*[]*/
_imopVarPre236 = dims[1];
/*[]*/
_imopVarPre237 = -1;
/*[]*/
int is;
/*[]*/
int *d;
/*[]*/
struct stUn_imopVarPre11 ( *x )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout )[64][64];
/*[]*/
is = _imopVarPre237;
/*[]*/
d = _imopVarPre236;
/*[]*/
x = x1;
/*[]*/
xout = x1;
/*[]*/
int logd[3];
/*[54]*/
#pragma omp parallel default(shared) shared(is)
{
/*[54]*/
/*[54]*/
int i;
/*[54]*/
int j;
/*[54]*/
int k;
/*[54]*/
int ii;
/*[54]*/
#pragma omp master
{
/*[54]*/
/*[54]*/
/*[54]*/
/*[54]*/
/*[54]*/
for (i = 0; i < 3; i++) {
/*[54]*/
/*[54]*/
int _imopVarPre250;
/*[54]*/
int _imopVarPre251;
/*[54]*/
_imopVarPre250 = d[i];
/*[54]*/
_imopVarPre251 = ilog2(_imopVarPre250);
/*[54]*/
/*[54]*/
logd[i] = _imopVarPre251;
}
}
/*[54]*/
// #pragma omp dummyFlush BARRIER_START
/*[54]*/
#pragma omp barrier
/*[55]*/
dcomplex y0[64][18];
/*[55]*/
dcomplex y1[64][18];
/*[55]*/
#pragma omp for nowait
/*[55]*/
/*[55]*/
/*[55]*/
for (k = 0; k < d[2]; k++) {
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
for (ii = 0; ii <= d[0] - fftblock; ii += fftblock) {
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
for (j = 0; j < d[1]; j++) {
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
for (i = 0; i < fftblock; i++) {
/*[55]*/
/*[55]*/
y0[j][i].real = x[k][j][i + ii].real;
/*[55]*/
y0[j][i].imag = x[k][j][i + ii].imag;
}
}
/*[55]*/
int _imopVarPre254;
/*[55]*/
int _imopVarPre255;
/*[55]*/
_imopVarPre254 = d[1];
/*[55]*/
_imopVarPre255 = logd[1];
/*[55]*/
cfftz(is, _imopVarPre255, _imopVarPre254, y0, y1);
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
for (j = 0; j < d[1]; j++) {
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
for (i = 0; i < fftblock; i++) {
/*[55]*/
/*[55]*/
xout[k][j][i + ii].real = y0[j][i].real;
/*[55]*/
xout[k][j][i + ii].imag = y0[j][i].imag;
}
}
}
}
}
/*[]*/
int ( *_imopVarPre240 );
/*[]*/
int _imopVarPre241;
/*[]*/
_imopVarPre240 = dims[0];
/*[]*/
_imopVarPre241 = -1;
/*[]*/
int is_imopVarPre97;
/*[]*/
int *d_imopVarPre98;
/*[]*/
struct stUn_imopVarPre11 ( *x_imopVarPre99 )[64][64];
/*[]*/
struct stUn_imopVarPre11 ( *xout_imopVarPre100 )[64][64];
/*[]*/
is_imopVarPre97 = _imopVarPre241;
/*[]*/
d_imopVarPre98 = _imopVarPre240;
/*[]*/
x_imopVarPre99 = x1;
/*[]*/
xout_imopVarPre100 = x2;
/*[]*/
int logd_imopVarPre101[3];
/*[56]*/
#pragma omp parallel default(shared) shared(is_imopVarPre97)
{
/*[56]*/
/*[56]*/
int i;
/*[56]*/
int j;
/*[56]*/
int k;
/*[56]*/
int jj;
/*[56]*/
#pragma omp master
{
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
for (i = 0; i < 3; i++) {
/*[56]*/
/*[56]*/
int _imopVarPre243;
/*[56]*/
int _imopVarPre244;
/*[56]*/
_imopVarPre243 = d_imopVarPre98[i];
/*[56]*/
_imopVarPre244 = ilog2(_imopVarPre243);
/*[56]*/
/*[56]*/
logd_imopVarPre101[i] = _imopVarPre244;
}
}
/*[56]*/
// #pragma omp dummyFlush BARRIER_START
/*[56]*/
#pragma omp barrier
/*[57]*/
dcomplex y0[64][18];
/*[57]*/
dcomplex y1[64][18];
/*[57]*/
#pragma omp for nowait
/*[57]*/
/*[57]*/
/*[57]*/
for (k = 0; k < d_imopVarPre98[2]; k++) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (jj = 0; jj <= d_imopVarPre98[1] - fftblock; jj += fftblock) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (j = 0; j < fftblock; j++) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (i = 0; i < d_imopVarPre98[0]; i++) {
/*[57]*/
/*[57]*/
y0[i][j].real = x_imopVarPre99[k][j + jj][i].real;
/*[57]*/
y0[i][j].imag = x_imopVarPre99[k][j + jj][i].imag;
}
}
/*[57]*/
int _imopVarPre247;
/*[57]*/
int _imopVarPre248;
/*[57]*/
_imopVarPre247 = d_imopVarPre98[0];
/*[57]*/
_imopVarPre248 = logd_imopVarPre101[0];
/*[57]*/
cfftz(is_imopVarPre97, _imopVarPre248, _imopVarPre247, y0, y1);
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (j = 0; j < fftblock; j++) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (i = 0; i < d_imopVarPre98[0]; i++) {
/*[57]*/
/*[57]*/
xout_imopVarPre100[k][j + jj][i].real = y0[i][j].real;
/*[57]*/
xout_imopVarPre100[k][j + jj][i].imag = y0[i][j].imag;
}
}
}
}
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
static void cffts1(int is, int d[3] , dcomplex x[64][64][64] , dcomplex xout[64][64][64] , dcomplex y0[64][18] , dcomplex y1[64][18]) {
/*[]*/
/*[]*/
int logd[3];
/*[58]*/
#pragma omp parallel default(shared) shared(is)
{
/*[58]*/
/*[58]*/
int i;
/*[58]*/
int j;
/*[58]*/
int k;
/*[58]*/
int jj;
/*[58]*/
#pragma omp master
{
/*[58]*/
/*[58]*/
/*[58]*/
/*[58]*/
/*[58]*/
for (i = 0; i < 3; i++) {
/*[58]*/
/*[58]*/
int _imopVarPre243;
/*[58]*/
int _imopVarPre244;
/*[58]*/
_imopVarPre243 = d[i];
/*[58]*/
_imopVarPre244 = ilog2(_imopVarPre243);
/*[58]*/
/*[58]*/
logd[i] = _imopVarPre244;
}
}
/*[58]*/
// #pragma omp dummyFlush BARRIER_START
/*[58]*/
#pragma omp barrier
/*[59]*/
dcomplex y0[64][18];
/*[59]*/
dcomplex y1[64][18];
/*[59]*/
#pragma omp for nowait
/*[59]*/
/*[59]*/
/*[59]*/
for (k = 0; k < d[2]; k++) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (jj = 0; jj <= d[1] - fftblock; jj += fftblock) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (j = 0; j < fftblock; j++) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (i = 0; i < d[0]; i++) {
/*[59]*/
/*[59]*/
y0[i][j].real = x[k][j + jj][i].real;
/*[59]*/
y0[i][j].imag = x[k][j + jj][i].imag;
}
}
/*[59]*/
int _imopVarPre247;
/*[59]*/
int _imopVarPre248;
/*[59]*/
_imopVarPre247 = d[0];
/*[59]*/
_imopVarPre248 = logd[0];
/*[59]*/
cfftz(is, _imopVarPre248, _imopVarPre247, y0, y1);
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (j = 0; j < fftblock; j++) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (i = 0; i < d[0]; i++) {
/*[59]*/
/*[59]*/
xout[k][j + jj][i].real = y0[i][j].real;
/*[59]*/
xout[k][j + jj][i].imag = y0[i][j].imag;
}
}
}
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
static void cffts2(int is, int d[3] , dcomplex x[64][64][64] , dcomplex xout[64][64][64] , dcomplex y0[64][18] , dcomplex y1[64][18]) {
/*[]*/
/*[]*/
int logd[3];
/*[60]*/
#pragma omp parallel default(shared) shared(is)
{
/*[60]*/
/*[60]*/
int i;
/*[60]*/
int j;
/*[60]*/
int k;
/*[60]*/
int ii;
/*[60]*/
#pragma omp master
{
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (i = 0; i < 3; i++) {
/*[60]*/
/*[60]*/
int _imopVarPre250;
/*[60]*/
int _imopVarPre251;
/*[60]*/
_imopVarPre250 = d[i];
/*[60]*/
_imopVarPre251 = ilog2(_imopVarPre250);
/*[60]*/
/*[60]*/
logd[i] = _imopVarPre251;
}
}
/*[60]*/
// #pragma omp dummyFlush BARRIER_START
/*[60]*/
#pragma omp barrier
/*[61]*/
dcomplex y0[64][18];
/*[61]*/
dcomplex y1[64][18];
/*[61]*/
#pragma omp for nowait
/*[61]*/
/*[61]*/
/*[61]*/
for (k = 0; k < d[2]; k++) {
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
for (ii = 0; ii <= d[0] - fftblock; ii += fftblock) {
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
for (j = 0; j < d[1]; j++) {
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
for (i = 0; i < fftblock; i++) {
/*[61]*/
/*[61]*/
y0[j][i].real = x[k][j][i + ii].real;
/*[61]*/
y0[j][i].imag = x[k][j][i + ii].imag;
}
}
/*[61]*/
int _imopVarPre254;
/*[61]*/
int _imopVarPre255;
/*[61]*/
_imopVarPre254 = d[1];
/*[61]*/
_imopVarPre255 = logd[1];
/*[61]*/
cfftz(is, _imopVarPre255, _imopVarPre254, y0, y1);
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
for (j = 0; j < d[1]; j++) {
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
for (i = 0; i < fftblock; i++) {
/*[61]*/
/*[61]*/
xout[k][j][i + ii].real = y0[j][i].real;
/*[61]*/
xout[k][j][i + ii].imag = y0[j][i].imag;
}
}
}
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
static void cffts3(int is, int d[3] , dcomplex x[64][64][64] , dcomplex xout[64][64][64] , dcomplex y0[64][18] , dcomplex y1[64][18]) {
/*[]*/
/*[]*/
int logd[3];
/*[62]*/
#pragma omp parallel default(shared) shared(is)
{
/*[62]*/
/*[62]*/
int i;
/*[62]*/
int j;
/*[62]*/
int k;
/*[62]*/
int ii;
/*[62]*/
#pragma omp master
{
/*[62]*/
/*[62]*/
/*[62]*/
/*[62]*/
/*[62]*/
for (i = 0; i < 3; i++) {
/*[62]*/
/*[62]*/
int _imopVarPre257;
/*[62]*/
int _imopVarPre258;
/*[62]*/
_imopVarPre257 = d[i];
/*[62]*/
_imopVarPre258 = ilog2(_imopVarPre257);
/*[62]*/
/*[62]*/
logd[i] = _imopVarPre258;
}
}
/*[62]*/
// #pragma omp dummyFlush BARRIER_START
/*[62]*/
#pragma omp barrier
/*[63]*/
dcomplex y0[64][18];
/*[63]*/
dcomplex y1[64][18];
/*[63]*/
#pragma omp for nowait
/*[63]*/
/*[63]*/
/*[63]*/
for (j = 0; j < d[1]; j++) {
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
for (ii = 0; ii <= d[0] - fftblock; ii += fftblock) {
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
for (k = 0; k < d[2]; k++) {
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
for (i = 0; i < fftblock; i++) {
/*[63]*/
/*[63]*/
y0[k][i].real = x[k][j][i + ii].real;
/*[63]*/
y0[k][i].imag = x[k][j][i + ii].imag;
}
}
/*[63]*/
int _imopVarPre261;
/*[63]*/
int _imopVarPre262;
/*[63]*/
_imopVarPre261 = d[2];
/*[63]*/
_imopVarPre262 = logd[2];
/*[63]*/
cfftz(is, _imopVarPre262, _imopVarPre261, y0, y1);
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
for (k = 0; k < d[2]; k++) {
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
for (i = 0; i < fftblock; i++) {
/*[63]*/
/*[63]*/
xout[k][j][i + ii].real = y0[k][i].real;
/*[63]*/
xout[k][j][i + ii].imag = y0[k][i].imag;
}
}
}
}
}
}
/*[]*/
/*[]*/
static void fft_init(int n) {
/*[]*/
/*[]*/
int m;
/*[]*/
int nu;
/*[]*/
int ku;
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int ln;
/*[]*/
double t;
/*[]*/
double ti;
/*[]*/
nu = n;
/*[]*/
m = ilog2(n);
/*[]*/
/*[]*/
u[0].real = (double) m;
/*[]*/
u[0].imag = 0.0;
/*[]*/
ku = 1;
/*[]*/
ln = 1;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = 1; j <= m; j++) {
/*[]*/
/*[]*/
t = 3.141592653589793238 / ln;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= ln - 1; i++) {
/*[]*/
/*[]*/
ti = i * t;
/*[]*/
double _imopVarPre263;
/*[]*/
_imopVarPre263 = cos(ti);
/*[]*/
/*[]*/
u[i + ku].real = _imopVarPre263;
/*[]*/
double _imopVarPre264;
/*[]*/
_imopVarPre264 = sin(ti);
/*[]*/
/*[]*/
u[i + ku].imag = _imopVarPre264;
}
/*[]*/
ku = ku + ln;
/*[]*/
ln = 2 * ln;
}
}
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
static void cfftz(int is, int m , int n , dcomplex x[64][18] , dcomplex y[64][18]) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int i;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int j;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int l;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int mx;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
mx = (int) (u[0].real);
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int _imopVarPre266;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int _imopVarPre267;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int _imopVarPre268;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
_imopVarPre266 = is != 1;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
if (_imopVarPre266) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
_imopVarPre266 = is != -1;
}
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
_imopVarPre267 = _imopVarPre266;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
if (!_imopVarPre267) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
_imopVarPre268 = m < 1;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
if (!_imopVarPre268) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
_imopVarPre268 = m > mx;
}
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
_imopVarPre267 = _imopVarPre268;
}
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
if (_imopVarPre267) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
printf("CFFTZ: Either U has not been initialized, or else\n" "one of the input parameters is invalid%5d%5d%5d\n", is, m, mx);
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
exit(1);
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
}
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
for (l = 1; l <= m; l += 2) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
fftz2(is, l, m, n, fftblock, fftblockpad, u, x, y);
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
if (l == m) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
break;
}
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int _imopVarPre270;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
_imopVarPre270 = l + 1;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
fftz2(is, _imopVarPre270, m, n, fftblock, fftblockpad, u, y, x);
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
}
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
if (m % 2 == 1) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
for (j = 0; j < n; j++) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
for (i = 0; i < fftblock; i++) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
x[j][i].real = y[j][i].real;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
x[j][i].imag = y[j][i].imag;
}
}
}
}
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
static void fftz2(int is, int l , int m , int n , int ny , int ny1 , dcomplex u[64] , dcomplex x[64][18] , dcomplex y[64][18]) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int k;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int n1;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int li;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int lj;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int lk;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int ku;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int i;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int j;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int i11;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int i12;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int i21;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
int i22;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
dcomplex u1;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
n1 = n / 2;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
if (l - 1 == 0) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
lk = 1;
} else {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
lk = 2 << ((l - 1) - 1);
}
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
if (m - l == 0) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
li = 1;
} else {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
li = 2 << ((m - l) - 1);
}
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
lj = 2 * lk;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
ku = li;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
for (i = 0; i < li; i++) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
i11 = i * lk;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
i12 = i11 + n1;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
i21 = i * lj;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
i22 = i21 + lk;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
if (is >= 1) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
u1.real = u[ku + i].real;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
u1.imag = u[ku + i].imag;
} else {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
u1.real = u[ku + i].real;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
u1.imag = -u[ku + i].imag;
}
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
for (k = 0; k < lk; k++) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
for (j = 0; j < ny; j++) {
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
double x11real;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
double x11imag;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
double x21real;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
double x21imag;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
x11real = x[i11 + k][j].real;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
x11imag = x[i11 + k][j].imag;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
x21real = x[i12 + k][j].real;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
x21imag = x[i12 + k][j].imag;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
y[i21 + k][j].real = x11real + x21real;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
y[i21 + k][j].imag = x11imag + x21imag;
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
y[i22 + k][j].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag);
/*[3, 5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 47, 49, 51, 53, 55, 57, 59, 61, 63]*/
y[i22 + k][j].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real);
}
}
}
}
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
static int ilog2(int n) {
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
int nn;
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
int lg;
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
if (n == 1) {
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
return 0;
}
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
lg = 1;
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
nn = 2;
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
while (nn < n) {
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
nn = nn << 1;
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
lg++;
}
/*[2, 4, 6, 8, 10, 12, 15, 17, 19, 21, 23, 25, 28, 29, 31, 33, 35, 37, 39, 46, 48, 50, 52, 54, 56, 58, 60, 62]*/
return lg;
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
static void checksum(int i, dcomplex u1[64][64][64] , int d[3]) {
/*[]*/
/*[]*/
int j;
/*[]*/
int q;
/*[]*/
int r;
/*[]*/
int s;
/*[]*/
dcomplex chk;
/*[]*/
chk.real = 0.0;
/*[]*/
chk.imag = 0.0;
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (j = 1; j <= 1024; j++) {
/*[]*/
/*[]*/
q = j % 64 + 1;
/*[]*/
int _imopVarPre272;
/*[]*/
_imopVarPre272 = q >= xstart[0];
/*[]*/
/*[]*/
if (_imopVarPre272) {
/*[]*/
/*[]*/
_imopVarPre272 = q <= xend[0];
}
/*[]*/
/*[]*/
if (_imopVarPre272) {
/*[]*/
/*[]*/
r = (3 * j) % 64 + 1;
/*[]*/
int _imopVarPre274;
/*[]*/
_imopVarPre274 = r >= ystart[0];
/*[]*/
/*[]*/
if (_imopVarPre274) {
/*[]*/
/*[]*/
_imopVarPre274 = r <= yend[0];
}
/*[]*/
/*[]*/
if (_imopVarPre274) {
/*[]*/
/*[]*/
s = (5 * j) % 64 + 1;
/*[]*/
int _imopVarPre276;
/*[]*/
_imopVarPre276 = s >= zstart[0];
/*[]*/
/*[]*/
if (_imopVarPre276) {
/*[]*/
/*[]*/
_imopVarPre276 = s <= zend[0];
}
/*[]*/
/*[]*/
if (_imopVarPre276) {
/*[]*/
/*[]*/
chk.real = chk.real + u1[s - zstart[0]][r - ystart[0]][q - xstart[0]].real;
/*[]*/
(chk.imag = chk.imag + u1[s - zstart[0]][r - ystart[0]][q - xstart[0]].imag);
}
}
}
}
/*[]*/
// #pragma omp dummyFlush CRITICAL_START
/*[]*/
#pragma omp critical
{
/*[]*/
/*[]*/
sums[i].real += chk.real;
/*[]*/
sums[i].imag += chk.imag;
}
/*[]*/
// #pragma omp dummyFlush CRITICAL_END
/*[]*/
// #pragma omp dummyFlush BARRIER_START
/*[]*/
#pragma omp barrier
/*[]*/
#pragma omp single nowait
{
/*[]*/
/*[]*/
sums[i].real = sums[i].real / (double) 262144;
/*[]*/
sums[i].imag = sums[i].imag / (double) 262144;
/*[]*/
double _imopVarPre279;
/*[]*/
double _imopVarPre280;
/*[]*/
_imopVarPre279 = sums[i].imag;
/*[]*/
_imopVarPre280 = sums[i].real;
/*[]*/
printf("T = %5d Checksum = %22.12e %22.12e\n", i, _imopVarPre280, _imopVarPre279);
/*[]*/
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
static void verify(int d1, int d2 , int d3 , int nt , boolean *verified , char *class) {
/*[]*/
/*[]*/
int i;
/*[]*/
double err;
/*[]*/
double epsilon;
/*[]*/
double vdata_real_s[6 + 1] = {0.0, 5.546087004964e+02 , 5.546385409189e+02 , 5.546148406171e+02 , 5.545423607415e+02 , 5.544255039624e+02 , 5.542683411902e+02};
/*[]*/
double vdata_imag_s[6 + 1] = {0.0, 4.845363331978e+02 , 4.865304269511e+02 , 4.883910722336e+02 , 4.901273169046e+02 , 4.917475857993e+02 , 4.932597244941e+02};
/*[]*/
double vdata_real_w[6 + 1] = {0.0, 5.673612178944e+02 , 5.631436885271e+02 , 5.594024089970e+02 , 5.560698047020e+02 , 5.530898991250e+02 , 5.504159734538e+02};
/*[]*/
double vdata_imag_w[6 + 1] = {0.0, 5.293246849175e+02 , 5.282149986629e+02 , 5.270996558037e+02 , 5.260027904925e+02 , 5.249400845633e+02 , 5.239212247086e+02};
/*[]*/
double vdata_real_a[6 + 1] = {0.0, 5.046735008193e+02 , 5.059412319734e+02 , 5.069376896287e+02 , 5.077892868474e+02 , 5.085233095391e+02 , 5.091487099959e+02};
/*[]*/
double vdata_imag_a[6 + 1] = {0.0, 5.114047905510e+02 , 5.098809666433e+02 , 5.098144042213e+02 , 5.101336130759e+02 , 5.104914655194e+02 , 5.107917842803e+02};
/*[]*/
double vdata_real_b[20 + 1] = {0.0, 5.177643571579e+02 , 5.154521291263e+02 , 5.146409228649e+02 , 5.142378756213e+02 , 5.139626667737e+02 , 5.137423460082e+02 , 5.135547056878e+02 , 5.133910925466e+02 , 5.132470705390e+02 , 5.131197729984e+02 , 5.130070319283e+02 , 5.129070537032e+02 , 5.128182883502e+02 , 5.127393733383e+02 , 5.126691062020e+02 , 5.126064276004e+02 , 5.125504076570e+02 , 5.125002331720e+02 , 5.124551951846e+02 , 5.124146770029e+02};
/*[]*/
double vdata_imag_b[20 + 1] = {0.0, 5.077803458597e+02 , 5.088249431599e+02 , 5.096208912659e+02 , 5.101023387619e+02 , 5.103976610617e+02 , 5.105948019802e+02 , 5.107404165783e+02 , 5.108576573661e+02 , 5.109577278523e+02 , 5.110460304483e+02 , 5.111252433800e+02 , 5.111968077718e+02 , 5.112616233064e+02 , 5.113203605551e+02 , 5.113735928093e+02 , 5.114218460548e+02 , 5.114656139760e+02 , 5.115053595966e+02 , 5.115415130407e+02 , 5.115744692211e+02};
/*[]*/
double vdata_real_c[20 + 1] = {0.0, 5.195078707457e+02 , 5.155422171134e+02 , 5.144678022222e+02 , 5.140150594328e+02 , 5.137550426810e+02 , 5.135811056728e+02 , 5.134569343165e+02 , 5.133651975661e+02 , 5.132955192805e+02 , 5.132410471738e+02 , 5.131971141679e+02 , 5.131605205716e+02 , 5.131290734194e+02 , 5.131012720314e+02 , 5.130760908195e+02 , 5.130528295923e+02 , 5.130310107773e+02 , 5.130103090133e+02 , 5.129905029333e+02 , 5.129714421109e+02};
/*[]*/
double vdata_imag_c[20 + 1] = {0.0, 5.149019699238e+02 , 5.127578201997e+02 , 5.122251847514e+02 , 5.121090289018e+02 , 5.121143685824e+02 , 5.121496764568e+02 , 5.121870921893e+02 , 5.122193250322e+02 , 5.122454735794e+02 , 5.122663649603e+02 , 5.122830879827e+02 , 5.122965869718e+02 , 5.123075927445e+02 , 5.123166486553e+02 , 5.123241541685e+02 , 5.123304037599e+02 , 5.123356167976e+02 , 5.123399592211e+02 , 5.123435588985e+02 , 5.123465164008e+02};
/*[]*/
epsilon = 1.0e-12;
/*[]*/
*verified = 1;
/*[]*/
*class = 'U';
/*[]*/
int _imopVarPre284;
/*[]*/
int _imopVarPre285;
/*[]*/
int _imopVarPre286;
/*[]*/
_imopVarPre284 = d1 == 64;
/*[]*/
/*[]*/
if (_imopVarPre284) {
/*[]*/
/*[]*/
_imopVarPre285 = d2 == 64;
/*[]*/
/*[]*/
if (_imopVarPre285) {
/*[]*/
/*[]*/
_imopVarPre286 = d3 == 64;
/*[]*/
/*[]*/
if (_imopVarPre286) {
/*[]*/
/*[]*/
_imopVarPre286 = nt == 6;
}
/*[]*/
_imopVarPre285 = _imopVarPre286;
}
/*[]*/
_imopVarPre284 = _imopVarPre285;
}
/*[]*/
/*[]*/
if (_imopVarPre284) {
/*[]*/
/*[]*/
*class = 'S';
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 1; i <= nt; i++) {
/*[]*/
/*[]*/
err = (sums[i].real - vdata_real_s[i]) / vdata_real_s[i];
/*[]*/
double _imopVarPre288;
/*[]*/
_imopVarPre288 = fabs(err);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre288 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
break;
}
/*[]*/
err = (sums[i].imag - vdata_imag_s[i]) / vdata_imag_s[i];
/*[]*/
double _imopVarPre290;
/*[]*/
_imopVarPre290 = fabs(err);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre290 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
break;
}
}
} else {
/*[]*/
/*[]*/
int _imopVarPre294;
/*[]*/
int _imopVarPre295;
/*[]*/
int _imopVarPre296;
/*[]*/
_imopVarPre294 = d1 == 128;
/*[]*/
/*[]*/
if (_imopVarPre294) {
/*[]*/
/*[]*/
_imopVarPre295 = d2 == 128;
/*[]*/
/*[]*/
if (_imopVarPre295) {
/*[]*/
/*[]*/
_imopVarPre296 = d3 == 32;
/*[]*/
/*[]*/
if (_imopVarPre296) {
/*[]*/
/*[]*/
_imopVarPre296 = nt == 6;
}
/*[]*/
_imopVarPre295 = _imopVarPre296;
}
/*[]*/
_imopVarPre294 = _imopVarPre295;
}
/*[]*/
/*[]*/
if (_imopVarPre294) {
/*[]*/
/*[]*/
*class = 'W';
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 1; i <= nt; i++) {
/*[]*/
/*[]*/
err = (sums[i].real - vdata_real_w[i]) / vdata_real_w[i];
/*[]*/
double _imopVarPre298;
/*[]*/
_imopVarPre298 = fabs(err);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre298 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
break;
}
/*[]*/
err = (sums[i].imag - vdata_imag_w[i]) / vdata_imag_w[i];
/*[]*/
double _imopVarPre300;
/*[]*/
_imopVarPre300 = fabs(err);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre300 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
break;
}
}
} else {
/*[]*/
/*[]*/
int _imopVarPre304;
/*[]*/
int _imopVarPre305;
/*[]*/
int _imopVarPre306;
/*[]*/
_imopVarPre304 = d1 == 256;
/*[]*/
/*[]*/
if (_imopVarPre304) {
/*[]*/
/*[]*/
_imopVarPre305 = d2 == 256;
/*[]*/
/*[]*/
if (_imopVarPre305) {
/*[]*/
/*[]*/
_imopVarPre306 = d3 == 128;
/*[]*/
/*[]*/
if (_imopVarPre306) {
/*[]*/
/*[]*/
_imopVarPre306 = nt == 6;
}
/*[]*/
_imopVarPre305 = _imopVarPre306;
}
/*[]*/
_imopVarPre304 = _imopVarPre305;
}
/*[]*/
/*[]*/
if (_imopVarPre304) {
/*[]*/
/*[]*/
*class = 'A';
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 1; i <= nt; i++) {
/*[]*/
/*[]*/
err = (sums[i].real - vdata_real_a[i]) / vdata_real_a[i];
/*[]*/
double _imopVarPre308;
/*[]*/
_imopVarPre308 = fabs(err);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre308 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
break;
}
/*[]*/
err = (sums[i].imag - vdata_imag_a[i]) / vdata_imag_a[i];
/*[]*/
double _imopVarPre310;
/*[]*/
_imopVarPre310 = fabs(err);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre310 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
break;
}
}
} else {
/*[]*/
/*[]*/
int _imopVarPre314;
/*[]*/
int _imopVarPre315;
/*[]*/
int _imopVarPre316;
/*[]*/
_imopVarPre314 = d1 == 512;
/*[]*/
/*[]*/
if (_imopVarPre314) {
/*[]*/
/*[]*/
_imopVarPre315 = d2 == 256;
/*[]*/
/*[]*/
if (_imopVarPre315) {
/*[]*/
/*[]*/
_imopVarPre316 = d3 == 256;
/*[]*/
/*[]*/
if (_imopVarPre316) {
/*[]*/
/*[]*/
_imopVarPre316 = nt == 20;
}
/*[]*/
_imopVarPre315 = _imopVarPre316;
}
/*[]*/
_imopVarPre314 = _imopVarPre315;
}
/*[]*/
/*[]*/
if (_imopVarPre314) {
/*[]*/
/*[]*/
*class = 'B';
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 1; i <= nt; i++) {
/*[]*/
/*[]*/
err = (sums[i].real - vdata_real_b[i]) / vdata_real_b[i];
/*[]*/
double _imopVarPre318;
/*[]*/
_imopVarPre318 = fabs(err);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre318 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
break;
}
/*[]*/
err = (sums[i].imag - vdata_imag_b[i]) / vdata_imag_b[i];
/*[]*/
double _imopVarPre320;
/*[]*/
_imopVarPre320 = fabs(err);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre320 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
break;
}
}
} else {
/*[]*/
/*[]*/
int _imopVarPre324;
/*[]*/
int _imopVarPre325;
/*[]*/
int _imopVarPre326;
/*[]*/
_imopVarPre324 = d1 == 512;
/*[]*/
/*[]*/
if (_imopVarPre324) {
/*[]*/
/*[]*/
_imopVarPre325 = d2 == 512;
/*[]*/
/*[]*/
if (_imopVarPre325) {
/*[]*/
/*[]*/
_imopVarPre326 = d3 == 512;
/*[]*/
/*[]*/
if (_imopVarPre326) {
/*[]*/
/*[]*/
_imopVarPre326 = nt == 20;
}
/*[]*/
_imopVarPre325 = _imopVarPre326;
}
/*[]*/
_imopVarPre324 = _imopVarPre325;
}
/*[]*/
/*[]*/
if (_imopVarPre324) {
/*[]*/
/*[]*/
*class = 'C';
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 1; i <= nt; i++) {
/*[]*/
/*[]*/
err = (sums[i].real - vdata_real_c[i]) / vdata_real_c[i];
/*[]*/
double _imopVarPre328;
/*[]*/
_imopVarPre328 = fabs(err);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre328 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
break;
}
/*[]*/
err = (sums[i].imag - vdata_imag_c[i]) / vdata_imag_c[i];
/*[]*/
double _imopVarPre330;
/*[]*/
_imopVarPre330 = fabs(err);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre330 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
break;
}
}
}
}
}
}
}
/*[]*/
/*[]*/
if (*class != 'U') {
/*[]*/
/*[]*/
printf("Result verification successful\n");
/*[]*/
} else {
/*[]*/
/*[]*/
printf("Result verification failed\n");
/*[]*/
}
/*[]*/
char _imopVarPre332;
/*[]*/
_imopVarPre332 = *class;
/*[]*/
printf("class = %1c\n", _imopVarPre332);
/*[]*/
}
|
GB_binop__pow_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pow_fp32
// A.*B function (eWiseMult): GB_AemultB__pow_fp32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__pow_fp32
// C+=b function (dense accum): GB_Cdense_accumb__pow_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_fp32
// C=scalar+B GB_bind1st__pow_fp32
// C=scalar+B' GB_bind1st_tran__pow_fp32
// C=A+scalar GB_bind2nd__pow_fp32
// C=A'+scalar GB_bind2nd_tran__pow_fp32
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = GB_powf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_powf (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_FP32 || GxB_NO_POW_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pow_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pow_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pow_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__pow_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pow_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__pow_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = GB_powf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__pow_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = GB_powf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = GB_powf (x, aij) ; \
}
GrB_Info GB_bind1st_tran__pow_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = GB_powf (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__pow_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
triplet_iw.c | /* Copyright (C) 2016 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include "grgrid.h"
#include "phonoc_utils.h"
#include "triplet.h"
#include "triplet_iw.h"
#include "tetrahedron_method.h"
static void set_freq_vertices(double freq_vertices[3][24][4],
const double *frequencies1,
const double *frequencies2,
const long vertices[2][24][4],
const long num_band1,
const long num_band2,
const long b1,
const long b2,
const long tp_type);
static long set_g(double g[3],
const double f0,
const double freq_vertices[3][24][4],
const long max_i);
static void get_triplet_tetrahedra_vertices(
long vertices[2][24][4],
const long tp_relative_grid_address[2][24][4][3],
const long triplet[3],
const ConstBZGrid *bzgrid);
static void
get_neighboring_grid_points_type1(long *neighboring_grid_points,
const long grid_point,
const long (*relative_grid_address)[3],
const long num_relative_grid_address,
const ConstBZGrid *bzgrid);
static void
get_neighboring_grid_points_type2(long *neighboring_grid_points,
const long grid_point,
const long (*relative_grid_address)[3],
const long num_relative_grid_address,
const ConstBZGrid *bzgrid);
void tpi_get_integration_weight(double *iw,
char *iw_zero,
const double *frequency_points,
const long num_band0,
const long tp_relative_grid_address[2][24][4][3],
const long triplets[3],
const long num_triplets,
const ConstBZGrid *bzgrid,
const double *frequencies1,
const long num_band1,
const double *frequencies2,
const long num_band2,
const long tp_type,
const long openmp_per_bands)
{
long max_i, j, b1, b2, b12, num_band_prod, adrs_shift;
long vertices[2][24][4];
double g[3];
double freq_vertices[3][24][4];
get_triplet_tetrahedra_vertices(vertices,
tp_relative_grid_address,
triplets,
bzgrid);
num_band_prod = num_triplets * num_band0 * num_band1 * num_band2;
/* tp_type: Type of integration weights stored */
/* */
/* g0 -> \delta(f0 - (-f1 + f2)) */
/* g1 -> \delta(f0 - (f1 - f2)) */
/* g2 -> \delta(f0 - (f1 + f2)) */
/* */
/* tp_type = 2: (g[2], g[0] - g[1]) mainly for ph-ph */
/* tp_type = 3: (g[2], g[0] - g[1], g[0] + g[1] + g[2]) mainly for ph-ph */
/* tp_type = 4: (g[0]) mainly for el-ph phonon decay, */
/* f0: ph, f1: el_i, f2: el_f */
if ((tp_type == 2) || (tp_type == 3))
{
max_i = 3;
}
if (tp_type == 4)
{
max_i = 1;
}
#ifdef PHPYOPENMP
#pragma omp parallel for private(j, b1, b2, adrs_shift, g, freq_vertices) if (openmp_per_bands)
#endif
for (b12 = 0; b12 < num_band1 * num_band2; b12++)
{
b1 = b12 / num_band2;
b2 = b12 % num_band2;
set_freq_vertices(freq_vertices, frequencies1, frequencies2,
vertices, num_band1, num_band2, b1, b2, tp_type);
for (j = 0; j < num_band0; j++)
{
adrs_shift = j * num_band1 * num_band2 + b1 * num_band2 + b2;
iw_zero[adrs_shift] = set_g(g, frequency_points[j], freq_vertices, max_i);
if (tp_type == 2)
{
iw[adrs_shift] = g[2];
adrs_shift += num_band_prod;
iw[adrs_shift] = g[0] - g[1];
}
if (tp_type == 3)
{
iw[adrs_shift] = g[2];
adrs_shift += num_band_prod;
iw[adrs_shift] = g[0] - g[1];
adrs_shift += num_band_prod;
iw[adrs_shift] = g[0] + g[1] + g[2];
}
if (tp_type == 4)
{
iw[adrs_shift] = g[0];
}
}
}
}
void tpi_get_integration_weight_with_sigma(double *iw,
char *iw_zero,
const double sigma,
const double cutoff,
const double *frequency_points,
const long num_band0,
const long triplet[3],
const long const_adrs_shift,
const double *frequencies,
const long num_band,
const long tp_type,
const long openmp_per_bands)
{
long j, b12, b1, b2, adrs_shift;
double f0, f1, f2, g0, g1, g2;
#ifdef PHPYOPENMP
#pragma omp parallel for private(j, b1, b2, f0, f1, f2, g0, g1, g2, adrs_shift) if (openmp_per_bands)
#endif
for (b12 = 0; b12 < num_band * num_band; b12++)
{
b1 = b12 / num_band;
b2 = b12 % num_band;
f1 = frequencies[triplet[1] * num_band + b1];
f2 = frequencies[triplet[2] * num_band + b2];
for (j = 0; j < num_band0; j++)
{
f0 = frequency_points[j];
adrs_shift = j * num_band * num_band + b1 * num_band + b2;
if ((tp_type == 2) || (tp_type == 3))
{
if (cutoff > 0 &&
fabs(f0 + f1 - f2) > cutoff &&
fabs(f0 - f1 + f2) > cutoff &&
fabs(f0 - f1 - f2) > cutoff)
{
iw_zero[adrs_shift] = 1;
g0 = 0;
g1 = 0;
g2 = 0;
}
else
{
iw_zero[adrs_shift] = 0;
g0 = phonoc_gaussian(f0 + f1 - f2, sigma);
g1 = phonoc_gaussian(f0 - f1 + f2, sigma);
g2 = phonoc_gaussian(f0 - f1 - f2, sigma);
}
if (tp_type == 2)
{
iw[adrs_shift] = g2;
adrs_shift += const_adrs_shift;
iw[adrs_shift] = g0 - g1;
}
if (tp_type == 3)
{
iw[adrs_shift] = g2;
adrs_shift += const_adrs_shift;
iw[adrs_shift] = g0 - g1;
adrs_shift += const_adrs_shift;
iw[adrs_shift] = g0 + g1 + g2;
}
}
if (tp_type == 4)
{
if (cutoff > 0 && fabs(f0 + f1 - f2) > cutoff)
{
iw_zero[adrs_shift] = 1;
iw[adrs_shift] = 0;
}
else
{
iw_zero[adrs_shift] = 0;
iw[adrs_shift] = phonoc_gaussian(f0 + f1 - f2, sigma);
}
}
}
}
}
void tpi_get_neighboring_grid_points(long *neighboring_grid_points,
const long grid_point,
const long (*relative_grid_address)[3],
const long num_relative_grid_address,
const ConstBZGrid *bzgrid)
{
if (bzgrid->type == 1)
{
get_neighboring_grid_points_type1(neighboring_grid_points,
grid_point,
relative_grid_address,
num_relative_grid_address,
bzgrid);
}
else
{
get_neighboring_grid_points_type2(neighboring_grid_points,
grid_point,
relative_grid_address,
num_relative_grid_address,
bzgrid);
}
}
static void set_freq_vertices(double freq_vertices[3][24][4],
const double *frequencies1,
const double *frequencies2,
const long vertices[2][24][4],
const long num_band1,
const long num_band2,
const long b1,
const long b2,
const long tp_type)
{
long i, j;
double f1, f2;
for (i = 0; i < 24; i++)
{
for (j = 0; j < 4; j++)
{
f1 = frequencies1[vertices[0][i][j] * num_band1 + b1];
f2 = frequencies2[vertices[1][i][j] * num_band2 + b2];
if ((tp_type == 2) || (tp_type == 3))
{
if (f1 < 0)
{
f1 = 0;
}
if (f2 < 0)
{
f2 = 0;
}
freq_vertices[0][i][j] = -f1 + f2;
freq_vertices[1][i][j] = f1 - f2;
freq_vertices[2][i][j] = f1 + f2;
}
else
{
freq_vertices[0][i][j] = -f1 + f2;
}
}
}
}
/* Integration weight g is calculated. */
/* iw_zero = 1 means g[0] to g[max_i - 1] are all zero. */
/* max_i depends on what we compute, e.g., ph-ph lifetime, */
/* ph-ph collision matrix, and el-ph relaxation time. */
/* iw_zero is definitely determined by in_tetrahedra in case that */
/* f0 is out of the tetrahedra. */
/* iw_zero=1 information can be used to omit to compute particles */
/* interaction strength that is often heaviest part in throughout */
/* calculation. */
static long set_g(double g[3],
const double f0,
const double freq_vertices[3][24][4],
const long max_i)
{
long i, iw_zero;
iw_zero = 1;
for (i = 0; i < max_i; i++)
{
if (thm_in_tetrahedra(f0, freq_vertices[i]))
{
g[i] = thm_get_integration_weight(f0, freq_vertices[i], 'I');
iw_zero = 0;
}
else
{
g[i] = 0;
}
}
return iw_zero;
}
static void get_triplet_tetrahedra_vertices(long vertices[2][24][4],
const long tp_relative_grid_address[2][24][4][3],
const long triplet[3],
const ConstBZGrid *bzgrid)
{
long i, j;
for (i = 0; i < 2; i++)
{
for (j = 0; j < 24; j++)
{
tpi_get_neighboring_grid_points(vertices[i][j],
triplet[i + 1],
tp_relative_grid_address[i][j],
4,
bzgrid);
}
}
}
static void
get_neighboring_grid_points_type1(long *neighboring_grid_points,
const long grid_point,
const long (*relative_grid_address)[3],
const long num_relative_grid_address,
const ConstBZGrid *bzgrid)
{
long bzmesh[3], bz_address[3];
long i, j, bz_gp, prod_bz_mesh;
for (i = 0; i < 3; i++)
{
bzmesh[i] = bzgrid->D_diag[i] * 2;
}
prod_bz_mesh = bzmesh[0] * bzmesh[1] * bzmesh[2];
for (i = 0; i < num_relative_grid_address; i++)
{
for (j = 0; j < 3; j++)
{
bz_address[j] = bzgrid->addresses[grid_point][j] + relative_grid_address[i][j];
}
bz_gp = bzgrid->gp_map[grg_get_grid_index(bz_address, bzmesh)];
if (bz_gp == prod_bz_mesh)
{
neighboring_grid_points[i] =
grg_get_grid_index(bz_address, bzgrid->D_diag);
}
else
{
neighboring_grid_points[i] = bz_gp;
}
}
}
static void
get_neighboring_grid_points_type2(long *neighboring_grid_points,
const long grid_point,
const long (*relative_grid_address)[3],
const long num_relative_grid_address,
const ConstBZGrid *bzgrid)
{
long bz_address[3];
long i, j, gp;
for (i = 0; i < num_relative_grid_address; i++)
{
for (j = 0; j < 3; j++)
{
bz_address[j] = bzgrid->addresses[grid_point][j] + relative_grid_address[i][j];
}
gp = grg_get_grid_index(bz_address, bzgrid->D_diag);
neighboring_grid_points[i] = bzgrid->gp_map[gp];
if (bzgrid->gp_map[gp + 1] - bzgrid->gp_map[gp] > 1)
{
for (j = bzgrid->gp_map[gp]; j < bzgrid->gp_map[gp + 1]; j++)
{
if (bz_address[0] == bzgrid->addresses[j][0] && bz_address[1] == bzgrid->addresses[j][1] && bz_address[2] == bzgrid->addresses[j][2])
{
neighboring_grid_points[i] = j;
break;
}
}
}
}
}
|
csr_matvec.c | /*BHEADER**********************************************************************
* Copyright (c) 2006 The Regents of the University of California.
* Produced at the Lawrence Livermore National Laboratory.
* Written by the HYPRE team. UCRL-CODE-222953.
* All rights reserved.
*
* This file is part of HYPRE (see http://www.llnl.gov/CASC/hypre/).
* Please see the COPYRIGHT_and_LICENSE file for the copyright notice,
* disclaimer, contact information and the GNU Lesser General Public License.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License (as published by the Free Software
* Foundation) version 2.1 dated February 1999.
*
* HYPRE is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the terms and conditions of the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* $Revision: 2.10 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "headers.h"
#include <assert.h>
#include "omp.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvec( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
int *A_rownnz = hypre_CSRMatrixRownnz(A);
int num_rownnz = hypre_CSRMatrixNumRownnz(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
int num_vectors = hypre_VectorNumVectors(x);
int idxstride_y = hypre_VectorIndexStride(y);
int vecstride_y = hypre_VectorVectorStride(y);
int idxstride_x = hypre_VectorIndexStride(x);
int vecstride_x = hypre_VectorVectorStride(x);
double temp, tempx;
int i, j, jj;
int m;
double xpar=0.7;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
//__WHATIF__BEGIN__
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= temp;
}
}
//__WHATIF__END__
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */
if (num_rownnz < xpar*(num_rows))
{
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = y_data[m];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] = tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = y_data[ j*vecstride_y + m*idxstride_y ];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] = tempx;
}
}
}
else
{
#pragma omp parallel for private(i,jj,temp) schedule(static)
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
else
for ( j=0; j<num_vectors; ++j )
{
temp = y_data[ j*vecstride_y + i*idxstride_y ];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
temp += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
}
y_data[ j*vecstride_y + i*idxstride_y ] = temp;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvecT( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
int num_vectors = hypre_VectorNumVectors(x);
int idxstride_y = hypre_VectorIndexStride(y);
int vecstride_y = hypre_VectorVectorStride(y);
int idxstride_x = hypre_VectorIndexStride(x);
int vecstride_x = hypre_VectorVectorStride(x);
double temp;
int i, i1, j, jv, jj, ns, ne, size, rest;
int num_threads;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
ierr = 1;
if (num_cols != y_size)
ierr = 2;
if (num_rows != x_size && num_cols != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
for (i1 = 0; i1 < num_threads; i1++)
{
size = num_cols/num_threads;
rest = num_cols - size*num_threads;
if (i1 < rest)
{
ns = i1*size+i1-1;
ne = (i1+1)*size+i1+1;
}
else
{
ns = i1*size+rest-1;
ne = (i1+1)*size+rest;
}
if ( num_vectors==1 )
{
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
if (j > ns && j < ne)
y_data[j] += A_data[jj] * x_data[i];
}
}
}
else
{
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
if (j > ns && j < ne)
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
}
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvec_FF( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y,
int *CF_marker_x,
int *CF_marker_y,
int fpt )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
double temp;
int i, jj;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= alpha;
}
return ierr;
}
|
GB_unaryop__abs_int8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int8_int32
// op(A') function: GB_tran__abs_int8_int32
// C type: int8_t
// A type: int32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int8_int32
(
int8_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sortc.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
typedef struct {
float val;
int index;
} THEFIT;
THEFIT *work;
THEFIT *a;
#pragma omp threadprivate (work,a)
void RecMergeSort(int left, int right);
void Sort(THEFIT *Ain, int n);
void Merge(int s, int n, int m);
void merge2(THEFIT *d1,int n,THEFIT *d2,int m,THEFIT *out);
THEFIT *vector(int nl, int nh);
void free_vector(THEFIT *v, int nl);
int main() {
THEFIT *data,*output;
int i,j,k,k1,k2,k3,k4;
printf("sort in c\n");
i=32;
data=vector(1,i);
for(j=1;j<=i;j++) {
data[j].index=j;
data[j].val=(float)rand()/(float)RAND_MAX;
printf("%d %g\n",data[j].index,data[j].val);
}
printf("\n\n");
k=i/2;
k1=k+1;
k2=(i-k1)+1;
#pragma omp sections
{
#pragma omp section
Sort(&data[1],k);
#pragma omp section
Sort(&data[k1],k2);
}
for(j=1;j<=k;j++) {
printf("%d %g\n",data[j].index,data[j].val);
}
printf("\n\n");
printf("\n\n");
for(j=k1;j<=i;j++) {
printf("%d %g\n",data[j].index,data[j].val);
}
printf("\n\n");
printf("\n\n");
output=vector(1,i);
merge2(&data[1],k,&data[k1],k2,&output[1]);
for(j=1;j<=i;j++) {
printf("%2d %10.7f\n",output[j].index,output[j].val);
}
return 0;
}
void Sort(THEFIT *Ain, int n){
work=vector(1,n);
a=Ain-1;
RecMergeSort(1,n);
free_vector(work,1);
}
void RecMergeSort(int left, int right) {
int middle;
if (left < right) {
middle = (left + right) / 2;
RecMergeSort(left,middle);
RecMergeSort(middle+1,right);
Merge(left,middle-left+1,right-middle);
}
}
void Merge(int s, int n, int m) {
int i, j, k, t, u;
k = 1;
t = s + n;
u = t + m;
i = s;
j = t;
if ((i < t) && (j < u)){
while ((i < t) && (j < u)){
if (a[i].val >= a[j].val){
work[k] = a[i];
i = i + 1;
k = k + 1;
} else {
work[k] = a[j];
j = j + 1;
k = k + 1;
}
}
}
if(i < t ){
while (i < t ) {
work[k] = a[i];
i = i + 1;
k = k + 1;
}
}
if(j < u){
while (j < u ) {
work[k] = a[j];
j = j + 1;
k = k + 1;
}
}
i = s;
k=k-1;
for( j = 1; j<= k; j++) {
a[i] = work[j];
i = i + 1;
}
}
/*
! this subroutine takes two sorted lists of type(THEFIT) and merges them
! input d1(1:n) , d2(1:m)
! output out(1:n+m)
*/
void merge2(THEFIT *d1,int n,THEFIT *d2,int m,THEFIT *out) {
int i,j,k;
i=1;
j=1;
d1--; d2--; out--;
for( k=1; k<=n+m;k++) {
if(i > n){
out[k]=d2[j];
j=j+1;
}
else if(j > m){
out[k]=d1[i];
i=i+1;
} else {
if(d1[i].val > d2[j].val){
out[k]=d1[i];
i=i+1;
} else {
out[k]=d2[j];
j=j+1;
}
}
}
}
THEFIT *vector(int nl, int nh)
{
THEFIT *v;
v=(THEFIT *)malloc((unsigned) (nh-nl+1)*sizeof(THEFIT));
if (!v) {
printf("allocation failure in ivector()\n");
exit(1);
}
return v-nl;
}
void free_vector(THEFIT *v, int nl)
{
free((char*) (v+nl));
}
|
c-parser.c | /* Parser for C and Objective-C.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Parser actions based on the old Bison parser; structure somewhat
influenced by and fragments based on the C++ parser.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* TODO:
Make sure all relevant comments, and all relevant code from all
actions, brought over from old parser. Verify exact correspondence
of syntax accepted.
Add testcases covering every input symbol in every state in old and
new parsers.
Include full syntax for GNU C, including erroneous cases accepted
with error messages, in syntax productions in comments.
Make more diagnostics in the front end generally take an explicit
location rather than implicitly using input_location. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h" /* For rtl.h: needs enum reg_class. */
#include "tree.h"
#include "langhooks.h"
#include "input.h"
#include "cpplib.h"
#include "timevar.h"
#include "c-family/c-pragma.h"
#include "c-tree.h"
#include "flags.h"
#include "output.h"
#include "ggc.h"
#include "c-family/c-common.h"
#include "c-family/c-objc.h"
#include "vec.h"
#include "target.h"
#include "cgraph.h"
#include "plugin.h"
/* Initialization routine for this file. */
void
c_parse_init (void)
{
/* The only initialization required is of the reserved word
identifiers. */
unsigned int i;
tree id;
int mask = 0;
/* Make sure RID_MAX hasn't grown past the 8 bits used to hold the keyword in
the c_token structure. */
gcc_assert (RID_MAX <= 255);
mask |= D_CXXONLY;
if (!flag_isoc99)
mask |= D_C99;
if (flag_no_asm)
{
mask |= D_ASM | D_EXT;
if (!flag_isoc99)
mask |= D_EXT89;
}
if (!c_dialect_objc ())
mask |= D_OBJC | D_CXX_OBJC;
ridpointers = ggc_alloc_cleared_vec_tree ((int) RID_MAX);
for (i = 0; i < num_c_common_reswords; i++)
{
/* If a keyword is disabled, do not enter it into the table
and so create a canonical spelling that isn't a keyword. */
if (c_common_reswords[i].disable & mask)
{
if (warn_cxx_compat
&& (c_common_reswords[i].disable & D_CXXWARN))
{
id = get_identifier (c_common_reswords[i].word);
C_SET_RID_CODE (id, RID_CXX_COMPAT_WARN);
C_IS_RESERVED_WORD (id) = 1;
}
continue;
}
id = get_identifier (c_common_reswords[i].word);
C_SET_RID_CODE (id, c_common_reswords[i].rid);
C_IS_RESERVED_WORD (id) = 1;
ridpointers [(int) c_common_reswords[i].rid] = id;
}
}
/* The C lexer intermediates between the lexer in cpplib and c-lex.c
and the C parser. Unlike the C++ lexer, the parser structure
stores the lexer information instead of using a separate structure.
Identifiers are separated into ordinary identifiers, type names,
keywords and some other Objective-C types of identifiers, and some
look-ahead is maintained.
??? It might be a good idea to lex the whole file up front (as for
C++). It would then be possible to share more of the C and C++
lexer code, if desired. */
/* The following local token type is used. */
/* A keyword. */
#define CPP_KEYWORD ((enum cpp_ttype) (N_TTYPES + 1))
/* More information about the type of a CPP_NAME token. */
typedef enum c_id_kind {
/* An ordinary identifier. */
C_ID_ID,
/* An identifier declared as a typedef name. */
C_ID_TYPENAME,
/* An identifier declared as an Objective-C class name. */
C_ID_CLASSNAME,
/* An address space identifier. */
C_ID_ADDRSPACE,
/* Not an identifier. */
C_ID_NONE
} c_id_kind;
/* A single C token after string literal concatenation and conversion
of preprocessing tokens to tokens. */
typedef struct GTY (()) c_token {
/* The kind of token. */
ENUM_BITFIELD (cpp_ttype, type, 8);
/* If this token is a CPP_NAME, this value indicates whether also
declared as some kind of type. Otherwise, it is C_ID_NONE. */
ENUM_BITFIELD (c_id_kind, id_kind, 8);
/* If this token is a keyword, this value indicates which keyword.
Otherwise, this value is RID_MAX. */
ENUM_BITFIELD (rid, keyword, 8);
/* If this token is a CPP_PRAGMA, this indicates the pragma that
was seen. Otherwise it is PRAGMA_NONE. */
ENUM_BITFIELD (pragma_kind, pragma_kind, 8);
/* The location at which this token was found. */
location_t location;
/* The value associated with this token, if any. */
tree value;
} c_token;
/* A parser structure recording information about the state and
context of parsing. Includes lexer information with up to two
tokens of look-ahead; more are not needed for C. */
typedef struct GTY(()) c_parser {
/* The look-ahead tokens. */
c_token tokens[2];
/* How many look-ahead tokens are available (0, 1 or 2). */
short tokens_avail;
/* True if a syntax error is being recovered from; false otherwise.
c_parser_error sets this flag. It should clear this flag when
enough tokens have been consumed to recover from the error. */
BOOL_BITFIELD error : 1;
/* True if we're processing a pragma, and shouldn't automatically
consume CPP_PRAGMA_EOL. */
BOOL_BITFIELD in_pragma : 1;
/* True if we're parsing the outermost block of an if statement. */
BOOL_BITFIELD in_if_block : 1;
/* True if we want to lex an untranslated string. */
BOOL_BITFIELD lex_untranslated_string : 1;
/* Objective-C specific parser/lexer information. */
/* True if we are in a context where the Objective-C "PQ" keywords
are considered keywords. */
BOOL_BITFIELD objc_pq_context : 1;
/* True if we are parsing a (potential) Objective-C foreach
statement. This is set to true after we parsed 'for (' and while
we wait for 'in' or ';' to decide if it's a standard C for loop or an
Objective-C foreach loop. */
BOOL_BITFIELD objc_could_be_foreach_context : 1;
/* The following flag is needed to contextualize Objective-C lexical
analysis. In some cases (e.g., 'int NSObject;'), it is
undesirable to bind an identifier to an Objective-C class, even
if a class with that name exists. */
BOOL_BITFIELD objc_need_raw_identifier : 1;
/* True if we are in a context where the Objective-C "Property attribute"
keywords are valid. */
BOOL_BITFIELD objc_property_attr_context : 1;
} c_parser;
/* The actual parser and external interface. ??? Does this need to be
garbage-collected? */
static GTY (()) c_parser *the_parser;
/* Read in and lex a single token, storing it in *TOKEN. */
static void
c_lex_one_token (c_parser *parser, c_token *token)
{
timevar_push (TV_LEX);
token->type = c_lex_with_flags (&token->value, &token->location, NULL,
(parser->lex_untranslated_string
? C_LEX_STRING_NO_TRANSLATE : 0));
token->id_kind = C_ID_NONE;
token->keyword = RID_MAX;
token->pragma_kind = PRAGMA_NONE;
switch (token->type)
{
case CPP_NAME:
{
tree decl;
bool objc_force_identifier = parser->objc_need_raw_identifier;
if (c_dialect_objc ())
parser->objc_need_raw_identifier = false;
if (C_IS_RESERVED_WORD (token->value))
{
enum rid rid_code = C_RID_CODE (token->value);
if (rid_code == RID_CXX_COMPAT_WARN)
{
warning_at (token->location,
OPT_Wc___compat,
"identifier %qE conflicts with C++ keyword",
token->value);
}
else if (rid_code >= RID_FIRST_ADDR_SPACE
&& rid_code <= RID_LAST_ADDR_SPACE)
{
token->id_kind = C_ID_ADDRSPACE;
token->keyword = rid_code;
break;
}
else if (c_dialect_objc () && OBJC_IS_PQ_KEYWORD (rid_code))
{
/* We found an Objective-C "pq" keyword (in, out,
inout, bycopy, byref, oneway). They need special
care because the interpretation depends on the
context. */
if (parser->objc_pq_context)
{
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
else if (parser->objc_could_be_foreach_context
&& rid_code == RID_IN)
{
/* We are in Objective-C, inside a (potential)
foreach context (which means after having
parsed 'for (', but before having parsed ';'),
and we found 'in'. We consider it the keyword
which terminates the declaration at the
beginning of a foreach-statement. Note that
this means you can't use 'in' for anything else
in that context; in particular, in Objective-C
you can't use 'in' as the name of the running
variable in a C for loop. We could potentially
try to add code here to disambiguate, but it
seems a reasonable limitation. */
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
/* Else, "pq" keywords outside of the "pq" context are
not keywords, and we fall through to the code for
normal tokens. */
}
else if (c_dialect_objc () && OBJC_IS_PATTR_KEYWORD (rid_code))
{
/* We found an Objective-C "property attribute"
keyword (getter, setter, readonly, etc). These are
only valid in the property context. */
if (parser->objc_property_attr_context)
{
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
/* Else they are not special keywords.
*/
}
else if (c_dialect_objc ()
&& (OBJC_IS_AT_KEYWORD (rid_code)
|| OBJC_IS_CXX_KEYWORD (rid_code)))
{
/* We found one of the Objective-C "@" keywords (defs,
selector, synchronized, etc) or one of the
Objective-C "cxx" keywords (class, private,
protected, public, try, catch, throw) without a
preceding '@' sign. Do nothing and fall through to
the code for normal tokens (in C++ we would still
consider the CXX ones keywords, but not in C). */
;
}
else
{
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
}
decl = lookup_name (token->value);
if (decl)
{
if (TREE_CODE (decl) == TYPE_DECL)
{
token->id_kind = C_ID_TYPENAME;
break;
}
}
else if (c_dialect_objc ())
{
tree objc_interface_decl = objc_is_class_name (token->value);
/* Objective-C class names are in the same namespace as
variables and typedefs, and hence are shadowed by local
declarations. */
if (objc_interface_decl
&& (global_bindings_p ()
|| (!objc_force_identifier && !decl)))
{
token->value = objc_interface_decl;
token->id_kind = C_ID_CLASSNAME;
break;
}
}
token->id_kind = C_ID_ID;
}
break;
case CPP_AT_NAME:
/* This only happens in Objective-C; it must be a keyword. */
token->type = CPP_KEYWORD;
switch (C_RID_CODE (token->value))
{
/* Replace 'class' with '@class', 'private' with '@private',
etc. This prevents confusion with the C++ keyword
'class', and makes the tokens consistent with other
Objective-C 'AT' keywords. For example '@class' is
reported as RID_AT_CLASS which is consistent with
'@synchronized', which is reported as
RID_AT_SYNCHRONIZED.
*/
case RID_CLASS: token->keyword = RID_AT_CLASS; break;
case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break;
case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break;
case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break;
case RID_THROW: token->keyword = RID_AT_THROW; break;
case RID_TRY: token->keyword = RID_AT_TRY; break;
case RID_CATCH: token->keyword = RID_AT_CATCH; break;
default: token->keyword = C_RID_CODE (token->value);
}
break;
case CPP_COLON:
case CPP_COMMA:
case CPP_CLOSE_PAREN:
case CPP_SEMICOLON:
/* These tokens may affect the interpretation of any identifiers
following, if doing Objective-C. */
if (c_dialect_objc ())
parser->objc_need_raw_identifier = false;
break;
case CPP_PRAGMA:
/* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */
token->pragma_kind = (enum pragma_kind) TREE_INT_CST_LOW (token->value);
token->value = NULL;
break;
default:
break;
}
timevar_pop (TV_LEX);
}
/* Return a pointer to the next token from PARSER, reading it in if
necessary. */
static inline c_token *
c_parser_peek_token (c_parser *parser)
{
if (parser->tokens_avail == 0)
{
c_lex_one_token (parser, &parser->tokens[0]);
parser->tokens_avail = 1;
}
return &parser->tokens[0];
}
/* Return true if the next token from PARSER has the indicated
TYPE. */
static inline bool
c_parser_next_token_is (c_parser *parser, enum cpp_ttype type)
{
return c_parser_peek_token (parser)->type == type;
}
/* Return true if the next token from PARSER does not have the
indicated TYPE. */
static inline bool
c_parser_next_token_is_not (c_parser *parser, enum cpp_ttype type)
{
return !c_parser_next_token_is (parser, type);
}
/* Return true if the next token from PARSER is the indicated
KEYWORD. */
static inline bool
c_parser_next_token_is_keyword (c_parser *parser, enum rid keyword)
{
return c_parser_peek_token (parser)->keyword == keyword;
}
/* Return a pointer to the next-but-one token from PARSER, reading it
in if necessary. The next token is already read in. */
static c_token *
c_parser_peek_2nd_token (c_parser *parser)
{
if (parser->tokens_avail >= 2)
return &parser->tokens[1];
gcc_assert (parser->tokens_avail == 1);
gcc_assert (parser->tokens[0].type != CPP_EOF);
gcc_assert (parser->tokens[0].type != CPP_PRAGMA_EOL);
c_lex_one_token (parser, &parser->tokens[1]);
parser->tokens_avail = 2;
return &parser->tokens[1];
}
/* Return true if TOKEN can start a type name,
false otherwise. */
static bool
c_token_starts_typename (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ID:
return false;
case C_ID_ADDRSPACE:
return true;
case C_ID_TYPENAME:
return true;
case C_ID_CLASSNAME:
gcc_assert (c_dialect_objc ());
return true;
default:
gcc_unreachable ();
}
case CPP_KEYWORD:
switch (token->keyword)
{
case RID_UNSIGNED:
case RID_LONG:
case RID_INT128:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_TYPEOF:
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
return true;
default:
return false;
}
case CPP_LESS:
if (c_dialect_objc ())
return true;
return false;
default:
return false;
}
}
enum c_lookahead_kind {
/* Always treat unknown identifiers as typenames. */
cla_prefer_type,
/* Could be parsing a nonabstract declarator. Only treat an identifier
as a typename if followed by another identifier or a star. */
cla_nonabstract_decl,
/* Never treat identifiers as typenames. */
cla_prefer_id
};
/* Return true if the next token from PARSER can start a type name,
false otherwise. LA specifies how to do lookahead in order to
detect unknown type names. If unsure, pick CLA_PREFER_ID. */
static inline bool
c_parser_next_tokens_start_typename (c_parser *parser, enum c_lookahead_kind la)
{
c_token *token = c_parser_peek_token (parser);
if (c_token_starts_typename (token))
return true;
/* Try a bit harder to detect an unknown typename. */
if (la != cla_prefer_id
&& token->type == CPP_NAME
&& token->id_kind == C_ID_ID
/* Do not try too hard when we could have "object in array". */
&& !parser->objc_could_be_foreach_context
&& (la == cla_prefer_type
|| c_parser_peek_2nd_token (parser)->type == CPP_NAME
|| c_parser_peek_2nd_token (parser)->type == CPP_MULT)
/* Only unknown identifiers. */
&& !lookup_name (token->value))
return true;
return false;
}
/* Return true if TOKEN is a type qualifier, false otherwise. */
static bool
c_token_is_qualifier (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ADDRSPACE:
return true;
default:
return false;
}
case CPP_KEYWORD:
switch (token->keyword)
{
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
return true;
default:
return false;
}
case CPP_LESS:
return false;
default:
gcc_unreachable ();
}
}
/* Return true if the next token from PARSER is a type qualifier,
false otherwise. */
static inline bool
c_parser_next_token_is_qualifier (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
return c_token_is_qualifier (token);
}
/* Return true if TOKEN can start declaration specifiers, false
otherwise. */
static bool
c_token_starts_declspecs (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ID:
return false;
case C_ID_ADDRSPACE:
return true;
case C_ID_TYPENAME:
return true;
case C_ID_CLASSNAME:
gcc_assert (c_dialect_objc ());
return true;
default:
gcc_unreachable ();
}
case CPP_KEYWORD:
switch (token->keyword)
{
case RID_STATIC:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_INLINE:
case RID_AUTO:
case RID_THREAD:
case RID_UNSIGNED:
case RID_LONG:
case RID_INT128:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_TYPEOF:
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
return true;
default:
return false;
}
case CPP_LESS:
if (c_dialect_objc ())
return true;
return false;
default:
return false;
}
}
/* Return true if TOKEN can start declaration specifiers or a static
assertion, false otherwise. */
static bool
c_token_starts_declaration (c_token *token)
{
if (c_token_starts_declspecs (token)
|| token->keyword == RID_STATIC_ASSERT)
return true;
else
return false;
}
/* Return true if the next token from PARSER can start declaration
specifiers, false otherwise. */
static inline bool
c_parser_next_token_starts_declspecs (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
/* In Objective-C, a classname normally starts a declspecs unless it
is immediately followed by a dot. In that case, it is the
Objective-C 2.0 "dot-syntax" for class objects, ie, calls the
setter/getter on the class. c_token_starts_declspecs() can't
differentiate between the two cases because it only checks the
current token, so we have a special check here. */
if (c_dialect_objc ()
&& token->type == CPP_NAME
&& token->id_kind == C_ID_CLASSNAME
&& c_parser_peek_2nd_token (parser)->type == CPP_DOT)
return false;
return c_token_starts_declspecs (token);
}
/* Return true if the next tokens from PARSER can start declaration
specifiers or a static assertion, false otherwise. */
static inline bool
c_parser_next_tokens_start_declaration (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
/* Same as above. */
if (c_dialect_objc ()
&& token->type == CPP_NAME
&& token->id_kind == C_ID_CLASSNAME
&& c_parser_peek_2nd_token (parser)->type == CPP_DOT)
return false;
/* Labels do not start declarations. */
if (token->type == CPP_NAME
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
return false;
if (c_token_starts_declaration (token))
return true;
if (c_parser_next_tokens_start_typename (parser, cla_nonabstract_decl))
return true;
return false;
}
/* Consume the next token from PARSER. */
static void
c_parser_consume_token (c_parser *parser)
{
gcc_assert (parser->tokens_avail >= 1);
gcc_assert (parser->tokens[0].type != CPP_EOF);
gcc_assert (!parser->in_pragma || parser->tokens[0].type != CPP_PRAGMA_EOL);
gcc_assert (parser->error || parser->tokens[0].type != CPP_PRAGMA);
if (parser->tokens_avail == 2)
parser->tokens[0] = parser->tokens[1];
parser->tokens_avail--;
}
/* Expect the current token to be a #pragma. Consume it and remember
that we've begun parsing a pragma. */
static void
c_parser_consume_pragma (c_parser *parser)
{
gcc_assert (!parser->in_pragma);
gcc_assert (parser->tokens_avail >= 1);
gcc_assert (parser->tokens[0].type == CPP_PRAGMA);
if (parser->tokens_avail == 2)
parser->tokens[0] = parser->tokens[1];
parser->tokens_avail--;
parser->in_pragma = true;
}
/* Update the globals input_location and in_system_header from
TOKEN. */
static inline void
c_parser_set_source_position_from_token (c_token *token)
{
if (token->type != CPP_EOF)
{
input_location = token->location;
}
}
/* Issue a diagnostic of the form
FILE:LINE: MESSAGE before TOKEN
where TOKEN is the next token in the input stream of PARSER.
MESSAGE (specified by the caller) is usually of the form "expected
OTHER-TOKEN".
Do not issue a diagnostic if still recovering from an error.
??? This is taken from the C++ parser, but building up messages in
this way is not i18n-friendly and some other approach should be
used. */
static void
c_parser_error (c_parser *parser, const char *gmsgid)
{
c_token *token = c_parser_peek_token (parser);
if (parser->error)
return;
parser->error = true;
if (!gmsgid)
return;
/* This diagnostic makes more sense if it is tagged to the line of
the token we just peeked at. */
c_parser_set_source_position_from_token (token);
c_parse_error (gmsgid,
/* Because c_parse_error does not understand
CPP_KEYWORD, keywords are treated like
identifiers. */
(token->type == CPP_KEYWORD ? CPP_NAME : token->type),
/* ??? The C parser does not save the cpp flags of a
token, we need to pass 0 here and we will not get
the source spelling of some tokens but rather the
canonical spelling. */
token->value, /*flags=*/0);
}
/* If the next token is of the indicated TYPE, consume it. Otherwise,
issue the error MSGID. If MSGID is NULL then a message has already
been produced and no message will be produced this time. Returns
true if found, false otherwise. */
static bool
c_parser_require (c_parser *parser,
enum cpp_ttype type,
const char *msgid)
{
if (c_parser_next_token_is (parser, type))
{
c_parser_consume_token (parser);
return true;
}
else
{
c_parser_error (parser, msgid);
return false;
}
}
/* If the next token is the indicated keyword, consume it. Otherwise,
issue the error MSGID. Returns true if found, false otherwise. */
static bool
c_parser_require_keyword (c_parser *parser,
enum rid keyword,
const char *msgid)
{
if (c_parser_next_token_is_keyword (parser, keyword))
{
c_parser_consume_token (parser);
return true;
}
else
{
c_parser_error (parser, msgid);
return false;
}
}
/* Like c_parser_require, except that tokens will be skipped until the
desired token is found. An error message is still produced if the
next token is not as expected. If MSGID is NULL then a message has
already been produced and no message will be produced this
time. */
static void
c_parser_skip_until_found (c_parser *parser,
enum cpp_ttype type,
const char *msgid)
{
unsigned nesting_depth = 0;
if (c_parser_require (parser, type, msgid))
return;
/* Skip tokens until the desired token is found. */
while (true)
{
/* Peek at the next token. */
c_token *token = c_parser_peek_token (parser);
/* If we've reached the token we want, consume it and stop. */
if (token->type == type && !nesting_depth)
{
c_parser_consume_token (parser);
break;
}
/* If we've run out of tokens, stop. */
if (token->type == CPP_EOF)
return;
if (token->type == CPP_PRAGMA_EOL && parser->in_pragma)
return;
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_SQUARE)
++nesting_depth;
else if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE)
{
if (nesting_depth-- == 0)
break;
}
/* Consume this token. */
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Skip tokens until the end of a parameter is found, but do not
consume the comma, semicolon or closing delimiter. */
static void
c_parser_skip_to_end_of_parameter (c_parser *parser)
{
unsigned nesting_depth = 0;
while (true)
{
c_token *token = c_parser_peek_token (parser);
if ((token->type == CPP_COMMA || token->type == CPP_SEMICOLON)
&& !nesting_depth)
break;
/* If we've run out of tokens, stop. */
if (token->type == CPP_EOF)
return;
if (token->type == CPP_PRAGMA_EOL && parser->in_pragma)
return;
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_SQUARE)
++nesting_depth;
else if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE)
{
if (nesting_depth-- == 0)
break;
}
/* Consume this token. */
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Expect to be at the end of the pragma directive and consume an
end of line marker. */
static void
c_parser_skip_to_pragma_eol (c_parser *parser)
{
gcc_assert (parser->in_pragma);
parser->in_pragma = false;
if (!c_parser_require (parser, CPP_PRAGMA_EOL, "expected end of line"))
while (true)
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_EOF)
break;
if (token->type == CPP_PRAGMA_EOL)
{
c_parser_consume_token (parser);
break;
}
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Skip tokens until we have consumed an entire block, or until we
have consumed a non-nested ';'. */
static void
c_parser_skip_to_end_of_block_or_statement (c_parser *parser)
{
unsigned nesting_depth = 0;
bool save_error = parser->error;
while (true)
{
c_token *token;
/* Peek at the next token. */
token = c_parser_peek_token (parser);
switch (token->type)
{
case CPP_EOF:
return;
case CPP_PRAGMA_EOL:
if (parser->in_pragma)
return;
break;
case CPP_SEMICOLON:
/* If the next token is a ';', we have reached the
end of the statement. */
if (!nesting_depth)
{
/* Consume the ';'. */
c_parser_consume_token (parser);
goto finished;
}
break;
case CPP_CLOSE_BRACE:
/* If the next token is a non-nested '}', then we have
reached the end of the current block. */
if (nesting_depth == 0 || --nesting_depth == 0)
{
c_parser_consume_token (parser);
goto finished;
}
break;
case CPP_OPEN_BRACE:
/* If it the next token is a '{', then we are entering a new
block. Consume the entire block. */
++nesting_depth;
break;
case CPP_PRAGMA:
/* If we see a pragma, consume the whole thing at once. We
have some safeguards against consuming pragmas willy-nilly.
Normally, we'd expect to be here with parser->error set,
which disables these safeguards. But it's possible to get
here for secondary error recovery, after parser->error has
been cleared. */
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
parser->error = save_error;
continue;
default:
break;
}
c_parser_consume_token (parser);
}
finished:
parser->error = false;
}
/* CPP's options (initialized by c-opts.c). */
extern cpp_options *cpp_opts;
/* Save the warning flags which are controlled by __extension__. */
static inline int
disable_extension_diagnostics (void)
{
int ret = (pedantic
| (warn_pointer_arith << 1)
| (warn_traditional << 2)
| (flag_iso << 3)
| (warn_long_long << 4)
| (warn_cxx_compat << 5));
cpp_opts->cpp_pedantic = pedantic = 0;
warn_pointer_arith = 0;
cpp_opts->cpp_warn_traditional = warn_traditional = 0;
flag_iso = 0;
cpp_opts->cpp_warn_long_long = warn_long_long = 0;
warn_cxx_compat = 0;
return ret;
}
/* Restore the warning flags which are controlled by __extension__.
FLAGS is the return value from disable_extension_diagnostics. */
static inline void
restore_extension_diagnostics (int flags)
{
cpp_opts->cpp_pedantic = pedantic = flags & 1;
warn_pointer_arith = (flags >> 1) & 1;
cpp_opts->cpp_warn_traditional = warn_traditional = (flags >> 2) & 1;
flag_iso = (flags >> 3) & 1;
cpp_opts->cpp_warn_long_long = warn_long_long = (flags >> 4) & 1;
warn_cxx_compat = (flags >> 5) & 1;
}
/* Possibly kinds of declarator to parse. */
typedef enum c_dtr_syn {
/* A normal declarator with an identifier. */
C_DTR_NORMAL,
/* An abstract declarator (maybe empty). */
C_DTR_ABSTRACT,
/* A parameter declarator: may be either, but after a type name does
not redeclare a typedef name as an identifier if it can
alternatively be interpreted as a typedef name; see DR#009,
applied in C90 TC1, omitted from C99 and reapplied in C99 TC2
following DR#249. For example, given a typedef T, "int T" and
"int *T" are valid parameter declarations redeclaring T, while
"int (T)" and "int * (T)" and "int (T[])" and "int (T (int))" are
abstract declarators rather than involving redundant parentheses;
the same applies with attributes inside the parentheses before
"T". */
C_DTR_PARM
} c_dtr_syn;
static void c_parser_external_declaration (c_parser *);
static void c_parser_asm_definition (c_parser *);
static void c_parser_declaration_or_fndef (c_parser *, bool, bool, bool,
bool, bool, tree *);
static void c_parser_static_assert_declaration_no_semi (c_parser *);
static void c_parser_static_assert_declaration (c_parser *);
static void c_parser_declspecs (c_parser *, struct c_declspecs *, bool, bool,
bool, enum c_lookahead_kind);
static struct c_typespec c_parser_enum_specifier (c_parser *);
static struct c_typespec c_parser_struct_or_union_specifier (c_parser *);
static tree c_parser_struct_declaration (c_parser *);
static struct c_typespec c_parser_typeof_specifier (c_parser *);
static struct c_declarator *c_parser_declarator (c_parser *, bool, c_dtr_syn,
bool *);
static struct c_declarator *c_parser_direct_declarator (c_parser *, bool,
c_dtr_syn, bool *);
static struct c_declarator *c_parser_direct_declarator_inner (c_parser *,
bool,
struct c_declarator *);
static struct c_arg_info *c_parser_parms_declarator (c_parser *, bool, tree);
static struct c_arg_info *c_parser_parms_list_declarator (c_parser *, tree);
static struct c_parm *c_parser_parameter_declaration (c_parser *, tree);
static tree c_parser_simple_asm_expr (c_parser *);
static tree c_parser_attributes (c_parser *);
static struct c_type_name *c_parser_type_name (c_parser *);
static struct c_expr c_parser_initializer (c_parser *);
static struct c_expr c_parser_braced_init (c_parser *, tree, bool);
static void c_parser_initelt (c_parser *, struct obstack *);
static void c_parser_initval (c_parser *, struct c_expr *,
struct obstack *);
static tree c_parser_compound_statement (c_parser *);
static void c_parser_compound_statement_nostart (c_parser *);
static void c_parser_label (c_parser *);
static void c_parser_statement (c_parser *);
static void c_parser_statement_after_labels (c_parser *);
static void c_parser_if_statement (c_parser *);
static void c_parser_switch_statement (c_parser *);
static void c_parser_while_statement (c_parser *);
static void c_parser_do_statement (c_parser *);
static void c_parser_for_statement (c_parser *);
static tree c_parser_asm_statement (c_parser *);
static tree c_parser_asm_operands (c_parser *, bool);
static tree c_parser_asm_goto_operands (c_parser *);
static tree c_parser_asm_clobbers (c_parser *);
static struct c_expr c_parser_expr_no_commas (c_parser *, struct c_expr *);
static struct c_expr c_parser_conditional_expression (c_parser *,
struct c_expr *);
static struct c_expr c_parser_binary_expression (c_parser *, struct c_expr *);
static struct c_expr c_parser_cast_expression (c_parser *, struct c_expr *);
static struct c_expr c_parser_unary_expression (c_parser *);
static struct c_expr c_parser_sizeof_expression (c_parser *);
static struct c_expr c_parser_alignof_expression (c_parser *);
static struct c_expr c_parser_postfix_expression (c_parser *);
static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *,
struct c_type_name *,
location_t);
static struct c_expr c_parser_postfix_expression_after_primary (c_parser *,
location_t loc,
struct c_expr);
static struct c_expr c_parser_expression (c_parser *);
static struct c_expr c_parser_expression_conv (c_parser *);
static VEC(tree,gc) *c_parser_expr_list (c_parser *, bool, bool,
VEC(tree,gc) **);
static void c_parser_omp_construct (c_parser *);
static void c_parser_omp_threadprivate (c_parser *);
static void c_parser_omp_barrier (c_parser *);
static void c_parser_omp_flush (c_parser *);
static void c_parser_omp_taskwait (c_parser *);
enum pragma_context { pragma_external, pragma_stmt, pragma_compound };
static bool c_parser_pragma (c_parser *, enum pragma_context);
/* These Objective-C parser functions are only ever called when
compiling Objective-C. */
static void c_parser_objc_class_definition (c_parser *, tree);
static void c_parser_objc_class_instance_variables (c_parser *);
static void c_parser_objc_class_declaration (c_parser *);
static void c_parser_objc_alias_declaration (c_parser *);
static void c_parser_objc_protocol_definition (c_parser *, tree);
static bool c_parser_objc_method_type (c_parser *);
static void c_parser_objc_method_definition (c_parser *);
static void c_parser_objc_methodprotolist (c_parser *);
static void c_parser_objc_methodproto (c_parser *);
static tree c_parser_objc_method_decl (c_parser *, bool, tree *);
static tree c_parser_objc_type_name (c_parser *);
static tree c_parser_objc_protocol_refs (c_parser *);
static void c_parser_objc_try_catch_finally_statement (c_parser *);
static void c_parser_objc_synchronized_statement (c_parser *);
static tree c_parser_objc_selector (c_parser *);
static tree c_parser_objc_selector_arg (c_parser *);
static tree c_parser_objc_receiver (c_parser *);
static tree c_parser_objc_message_args (c_parser *);
static tree c_parser_objc_keywordexpr (c_parser *);
static void c_parser_objc_at_property_declaration (c_parser *);
static void c_parser_objc_at_synthesize_declaration (c_parser *);
static void c_parser_objc_at_dynamic_declaration (c_parser *);
static bool c_parser_objc_diagnose_bad_element_prefix
(c_parser *, struct c_declspecs *);
/* Parse a translation unit (C90 6.7, C99 6.9).
translation-unit:
external-declarations
external-declarations:
external-declaration
external-declarations external-declaration
GNU extensions:
translation-unit:
empty
*/
static void
c_parser_translation_unit (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_EOF))
{
pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"ISO C forbids an empty translation unit");
}
else
{
void *obstack_position = obstack_alloc (&parser_obstack, 0);
mark_valid_location_for_stdc_pragma (false);
do
{
ggc_collect ();
c_parser_external_declaration (parser);
obstack_free (&parser_obstack, obstack_position);
}
while (c_parser_next_token_is_not (parser, CPP_EOF));
}
}
/* Parse an external declaration (C90 6.7, C99 6.9).
external-declaration:
function-definition
declaration
GNU extensions:
external-declaration:
asm-definition
;
__extension__ external-declaration
Objective-C:
external-declaration:
objc-class-definition
objc-class-declaration
objc-alias-declaration
objc-protocol-definition
objc-method-definition
@end
*/
static void
c_parser_external_declaration (c_parser *parser)
{
int ext;
switch (c_parser_peek_token (parser)->type)
{
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_EXTENSION:
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
c_parser_external_declaration (parser);
restore_extension_diagnostics (ext);
break;
case RID_ASM:
c_parser_asm_definition (parser);
break;
case RID_AT_INTERFACE:
case RID_AT_IMPLEMENTATION:
gcc_assert (c_dialect_objc ());
c_parser_objc_class_definition (parser, NULL_TREE);
break;
case RID_AT_CLASS:
gcc_assert (c_dialect_objc ());
c_parser_objc_class_declaration (parser);
break;
case RID_AT_ALIAS:
gcc_assert (c_dialect_objc ());
c_parser_objc_alias_declaration (parser);
break;
case RID_AT_PROTOCOL:
gcc_assert (c_dialect_objc ());
c_parser_objc_protocol_definition (parser, NULL_TREE);
break;
case RID_AT_PROPERTY:
gcc_assert (c_dialect_objc ());
c_parser_objc_at_property_declaration (parser);
break;
case RID_AT_SYNTHESIZE:
gcc_assert (c_dialect_objc ());
c_parser_objc_at_synthesize_declaration (parser);
break;
case RID_AT_DYNAMIC:
gcc_assert (c_dialect_objc ());
c_parser_objc_at_dynamic_declaration (parser);
break;
case RID_AT_END:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
objc_finish_implementation ();
break;
default:
goto decl_or_fndef;
}
break;
case CPP_SEMICOLON:
pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
case CPP_PRAGMA:
mark_valid_location_for_stdc_pragma (true);
c_parser_pragma (parser, pragma_external);
mark_valid_location_for_stdc_pragma (false);
break;
case CPP_PLUS:
case CPP_MINUS:
if (c_dialect_objc ())
{
c_parser_objc_method_definition (parser);
break;
}
/* Else fall through, and yield a syntax error trying to parse
as a declaration or function definition. */
default:
decl_or_fndef:
/* A declaration or a function definition (or, in Objective-C,
an @interface or @protocol with prefix attributes). We can
only tell which after parsing the declaration specifiers, if
any, and the first declarator. */
c_parser_declaration_or_fndef (parser, true, true, true, false, true, NULL);
break;
}
}
/* Parse a declaration or function definition (C90 6.5, 6.7.1, C99
6.7, 6.9.1). If FNDEF_OK is true, a function definition is
accepted; otherwise (old-style parameter declarations) only other
declarations are accepted. If STATIC_ASSERT_OK is true, a static
assertion is accepted; otherwise (old-style parameter declarations)
it is not. If NESTED is true, we are inside a function or parsing
old-style parameter declarations; any functions encountered are
nested functions and declaration specifiers are required; otherwise
we are at top level and functions are normal functions and
declaration specifiers may be optional. If EMPTY_OK is true, empty
declarations are OK (subject to all other constraints); otherwise
(old-style parameter declarations) they are diagnosed. If
START_ATTR_OK is true, the declaration specifiers may start with
attributes; otherwise they may not.
OBJC_FOREACH_OBJECT_DECLARATION can be used to get back the parsed
declaration when parsing an Objective-C foreach statement.
declaration:
declaration-specifiers init-declarator-list[opt] ;
static_assert-declaration
function-definition:
declaration-specifiers[opt] declarator declaration-list[opt]
compound-statement
declaration-list:
declaration
declaration-list declaration
init-declarator-list:
init-declarator
init-declarator-list , init-declarator
init-declarator:
declarator simple-asm-expr[opt] attributes[opt]
declarator simple-asm-expr[opt] attributes[opt] = initializer
GNU extensions:
nested-function-definition:
declaration-specifiers declarator declaration-list[opt]
compound-statement
Objective-C:
attributes objc-class-definition
attributes objc-category-definition
attributes objc-protocol-definition
The simple-asm-expr and attributes are GNU extensions.
This function does not handle __extension__; that is handled in its
callers. ??? Following the old parser, __extension__ may start
external declarations, declarations in functions and declarations
at the start of "for" loops, but not old-style parameter
declarations.
C99 requires declaration specifiers in a function definition; the
absence is diagnosed through the diagnosis of implicit int. In GNU
C we also allow but diagnose declarations without declaration
specifiers, but only at top level (elsewhere they conflict with
other syntax).
In Objective-C, declarations of the looping variable in a foreach
statement are exceptionally terminated by 'in' (for example, 'for
(NSObject *object in array) { ... }').
OpenMP:
declaration:
threadprivate-directive */
static void
c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok,
bool static_assert_ok, bool empty_ok,
bool nested, bool start_attr_ok,
tree *objc_foreach_object_declaration)
{
struct c_declspecs *specs;
tree prefix_attrs;
tree all_prefix_attrs;
bool diagnosed_no_specs = false;
location_t here = c_parser_peek_token (parser)->location;
if (static_assert_ok
&& c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT))
{
c_parser_static_assert_declaration (parser);
return;
}
specs = build_null_declspecs ();
/* Try to detect an unknown type name when we have "A B" or "A *B". */
if (c_parser_peek_token (parser)->type == CPP_NAME
&& c_parser_peek_token (parser)->id_kind == C_ID_ID
&& (c_parser_peek_2nd_token (parser)->type == CPP_NAME
|| c_parser_peek_2nd_token (parser)->type == CPP_MULT)
&& (!nested || !lookup_name (c_parser_peek_token (parser)->value)))
{
error_at (here, "unknown type name %qE",
c_parser_peek_token (parser)->value);
/* Parse declspecs normally to get a correct pointer type, but avoid
a further "fails to be a type name" error. Refuse nested functions
since it is not how the user likely wants us to recover. */
c_parser_peek_token (parser)->type = CPP_KEYWORD;
c_parser_peek_token (parser)->keyword = RID_VOID;
c_parser_peek_token (parser)->value = error_mark_node;
fndef_ok = !nested;
}
c_parser_declspecs (parser, specs, true, true, start_attr_ok, cla_nonabstract_decl);
if (parser->error)
{
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (nested && !specs->declspecs_seen_p)
{
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
finish_declspecs (specs);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
if (empty_ok)
shadow_tag (specs);
else
{
shadow_tag_warned (specs, 1);
pedwarn (here, 0, "empty declaration");
}
c_parser_consume_token (parser);
return;
}
/* Provide better error recovery. Note that a type name here is usually
better diagnosed as a redeclaration. */
if (empty_ok
&& specs->typespec_kind == ctsk_tagdef
&& c_parser_next_token_starts_declspecs (parser)
&& !c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected %<;%>, identifier or %<(%>");
parser->error = false;
shadow_tag_warned (specs, 1);
return;
}
else if (c_dialect_objc ())
{
/* Prefix attributes are an error on method decls. */
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
case CPP_MINUS:
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
if (specs->attrs)
{
warning_at (c_parser_peek_token (parser)->location,
OPT_Wattributes,
"prefix attributes are ignored for methods");
specs->attrs = NULL_TREE;
}
if (fndef_ok)
c_parser_objc_method_definition (parser);
else
c_parser_objc_methodproto (parser);
return;
break;
default:
break;
}
/* This is where we parse 'attributes @interface ...',
'attributes @implementation ...', 'attributes @protocol ...'
(where attributes could be, for example, __attribute__
((deprecated)).
*/
switch (c_parser_peek_token (parser)->keyword)
{
case RID_AT_INTERFACE:
{
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
c_parser_objc_class_definition (parser, specs->attrs);
return;
}
break;
case RID_AT_IMPLEMENTATION:
{
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
if (specs->attrs)
{
warning_at (c_parser_peek_token (parser)->location,
OPT_Wattributes,
"prefix attributes are ignored for implementations");
specs->attrs = NULL_TREE;
}
c_parser_objc_class_definition (parser, NULL_TREE);
return;
}
break;
case RID_AT_PROTOCOL:
{
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
c_parser_objc_protocol_definition (parser, specs->attrs);
return;
}
break;
case RID_AT_ALIAS:
case RID_AT_CLASS:
case RID_AT_END:
case RID_AT_PROPERTY:
if (specs->attrs)
{
c_parser_error (parser, "unexpected attribute");
specs->attrs = NULL;
}
break;
default:
break;
}
}
pending_xref_error ();
prefix_attrs = specs->attrs;
all_prefix_attrs = prefix_attrs;
specs->attrs = NULL_TREE;
while (true)
{
struct c_declarator *declarator;
bool dummy = false;
tree fnbody;
/* Declaring either one or more declarators (in which case we
should diagnose if there were no declaration specifiers) or a
function definition (in which case the diagnostic for
implicit int suffices). */
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_NORMAL, &dummy);
if (declarator == NULL)
{
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (c_parser_next_token_is (parser, CPP_EQ)
|| c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is_keyword (parser, RID_ASM)
|| c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)
|| c_parser_next_token_is_keyword (parser, RID_IN))
{
tree asm_name = NULL_TREE;
tree postfix_attrs = NULL_TREE;
if (!diagnosed_no_specs && !specs->declspecs_seen_p)
{
diagnosed_no_specs = true;
pedwarn (here, 0, "data definition has no type or storage class");
}
/* Having seen a data definition, there cannot now be a
function definition. */
fndef_ok = false;
if (c_parser_next_token_is_keyword (parser, RID_ASM))
asm_name = c_parser_simple_asm_expr (parser);
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_attributes (parser);
if (c_parser_next_token_is (parser, CPP_EQ))
{
tree d;
struct c_expr init;
location_t init_loc;
c_parser_consume_token (parser);
/* The declaration of the variable is in effect while
its initializer is parsed. */
d = start_decl (declarator, specs, true,
chainon (postfix_attrs, all_prefix_attrs));
if (!d)
d = error_mark_node;
start_init (d, asm_name, global_bindings_p ());
init_loc = c_parser_peek_token (parser)->location;
init = c_parser_initializer (parser);
finish_init ();
if (d != error_mark_node)
{
maybe_warn_string_init (TREE_TYPE (d), init);
finish_decl (d, init_loc, init.value,
init.original_type, asm_name);
}
}
else
{
tree d = start_decl (declarator, specs, false,
chainon (postfix_attrs,
all_prefix_attrs));
if (d)
finish_decl (d, UNKNOWN_LOCATION, NULL_TREE,
NULL_TREE, asm_name);
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
if (d)
*objc_foreach_object_declaration = d;
else
*objc_foreach_object_declaration = error_mark_node;
}
}
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
all_prefix_attrs = chainon (c_parser_attributes (parser),
prefix_attrs);
else
all_prefix_attrs = prefix_attrs;
continue;
}
else if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
return;
}
else if (c_parser_next_token_is_keyword (parser, RID_IN))
{
/* This can only happen in Objective-C: we found the
'in' that terminates the declaration inside an
Objective-C foreach statement. Do not consume the
token, so that the caller can use it to determine
that this indeed is a foreach context. */
return;
}
else
{
c_parser_error (parser, "expected %<,%> or %<;%>");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
}
else if (!fndef_ok)
{
c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, "
"%<asm%> or %<__attribute__%>");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
/* Function definition (nested or otherwise). */
if (nested)
{
pedwarn (here, OPT_pedantic, "ISO C forbids nested functions");
c_push_function_context ();
}
if (!start_function (specs, declarator, all_prefix_attrs))
{
/* This can appear in many cases looking nothing like a
function definition, so we don't give a more specific
error suggesting there was one. */
c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, %<asm%> "
"or %<__attribute__%>");
if (nested)
c_pop_function_context ();
break;
}
/* Parse old-style parameter declarations. ??? Attributes are
not allowed to start declaration specifiers here because of a
syntax conflict between a function declaration with attribute
suffix and a function definition with an attribute prefix on
first old-style parameter declaration. Following the old
parser, they are not accepted on subsequent old-style
parameter declarations either. However, there is no
ambiguity after the first declaration, nor indeed on the
first as long as we don't allow postfix attributes after a
declarator with a nonempty identifier list in a definition;
and postfix attributes have never been accepted here in
function definitions either. */
while (c_parser_next_token_is_not (parser, CPP_EOF)
&& c_parser_next_token_is_not (parser, CPP_OPEN_BRACE))
c_parser_declaration_or_fndef (parser, false, false, false,
true, false, NULL);
store_parm_decls ();
DECL_STRUCT_FUNCTION (current_function_decl)->function_start_locus
= c_parser_peek_token (parser)->location;
fnbody = c_parser_compound_statement (parser);
if (nested)
{
tree decl = current_function_decl;
/* Mark nested functions as needing static-chain initially.
lower_nested_functions will recompute it but the
DECL_STATIC_CHAIN flag is also used before that happens,
by initializer_constant_valid_p. See gcc.dg/nested-fn-2.c. */
DECL_STATIC_CHAIN (decl) = 1;
add_stmt (fnbody);
finish_function ();
c_pop_function_context ();
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl));
}
else
{
add_stmt (fnbody);
finish_function ();
}
break;
}
}
/* Parse an asm-definition (asm() outside a function body). This is a
GNU extension.
asm-definition:
simple-asm-expr ;
*/
static void
c_parser_asm_definition (c_parser *parser)
{
tree asm_str = c_parser_simple_asm_expr (parser);
if (asm_str)
cgraph_add_asm_node (asm_str);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse a static assertion (C1X N1425 6.7.10).
static_assert-declaration:
static_assert-declaration-no-semi ;
*/
static void
c_parser_static_assert_declaration (c_parser *parser)
{
c_parser_static_assert_declaration_no_semi (parser);
if (parser->error
|| !c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
}
/* Parse a static assertion (C1X N1425 6.7.10), without the trailing
semicolon.
static_assert-declaration-no-semi:
_Static_assert ( constant-expression , string-literal )
*/
static void
c_parser_static_assert_declaration_no_semi (c_parser *parser)
{
location_t assert_loc, value_loc;
tree value;
tree string;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT));
assert_loc = c_parser_peek_token (parser)->location;
if (!flag_isoc1x)
{
if (flag_isoc99)
pedwarn (assert_loc, OPT_pedantic,
"ISO C99 does not support %<_Static_assert%>");
else
pedwarn (assert_loc, OPT_pedantic,
"ISO C90 does not support %<_Static_assert%>");
}
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return;
value_loc = c_parser_peek_token (parser)->location;
value = c_parser_expr_no_commas (parser, NULL).value;
parser->lex_untranslated_string = true;
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
parser->lex_untranslated_string = false;
return;
}
switch (c_parser_peek_token (parser)->type)
{
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
string = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
parser->lex_untranslated_string = false;
break;
default:
c_parser_error (parser, "expected string literal");
parser->lex_untranslated_string = false;
return;
}
c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (!INTEGRAL_TYPE_P (TREE_TYPE (value)))
{
error_at (value_loc, "expression in static assertion is not an integer");
return;
}
if (TREE_CODE (value) != INTEGER_CST)
{
value = c_fully_fold (value, false, NULL);
if (TREE_CODE (value) == INTEGER_CST)
pedwarn (value_loc, OPT_pedantic, "expression in static assertion "
"is not an integer constant expression");
}
if (TREE_CODE (value) != INTEGER_CST)
{
error_at (value_loc, "expression in static assertion is not constant");
return;
}
constant_expression_warning (value);
if (integer_zerop (value))
error_at (assert_loc, "static assertion failed: %E", string);
}
/* Parse some declaration specifiers (possibly none) (C90 6.5, C99
6.7), adding them to SPECS (which may already include some).
Storage class specifiers are accepted iff SCSPEC_OK; type
specifiers are accepted iff TYPESPEC_OK; attributes are accepted at
the start iff START_ATTR_OK.
declaration-specifiers:
storage-class-specifier declaration-specifiers[opt]
type-specifier declaration-specifiers[opt]
type-qualifier declaration-specifiers[opt]
function-specifier declaration-specifiers[opt]
Function specifiers (inline) are from C99, and are currently
handled as storage class specifiers, as is __thread.
C90 6.5.1, C99 6.7.1:
storage-class-specifier:
typedef
extern
static
auto
register
C99 6.7.4:
function-specifier:
inline
C90 6.5.2, C99 6.7.2:
type-specifier:
void
char
short
int
long
float
double
signed
unsigned
_Bool
_Complex
[_Imaginary removed in C99 TC2]
struct-or-union-specifier
enum-specifier
typedef-name
(_Bool and _Complex are new in C99.)
C90 6.5.3, C99 6.7.3:
type-qualifier:
const
restrict
volatile
address-space-qualifier
(restrict is new in C99.)
GNU extensions:
declaration-specifiers:
attributes declaration-specifiers[opt]
type-qualifier:
address-space
address-space:
identifier recognized by the target
storage-class-specifier:
__thread
type-specifier:
typeof-specifier
__int128
_Decimal32
_Decimal64
_Decimal128
_Fract
_Accum
_Sat
(_Fract, _Accum, and _Sat are new from ISO/IEC DTR 18037:
http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1169.pdf)
Objective-C:
type-specifier:
class-name objc-protocol-refs[opt]
typedef-name objc-protocol-refs
objc-protocol-refs
*/
static void
c_parser_declspecs (c_parser *parser, struct c_declspecs *specs,
bool scspec_ok, bool typespec_ok, bool start_attr_ok,
enum c_lookahead_kind la)
{
bool attrs_ok = start_attr_ok;
bool seen_type = specs->typespec_kind != ctsk_none;
if (!typespec_ok)
gcc_assert (la == cla_prefer_id);
while (c_parser_next_token_is (parser, CPP_NAME)
|| c_parser_next_token_is (parser, CPP_KEYWORD)
|| (c_dialect_objc () && c_parser_next_token_is (parser, CPP_LESS)))
{
struct c_typespec t;
tree attrs;
location_t loc = c_parser_peek_token (parser)->location;
/* If we cannot accept a type, exit if the next token must start
one. Also, if we already have seen a tagged definition,
a typename would be an error anyway and likely the user
has simply forgotten a semicolon, so we exit. */
if ((!typespec_ok || specs->typespec_kind == ctsk_tagdef)
&& c_parser_next_tokens_start_typename (parser, la)
&& !c_parser_next_token_is_qualifier (parser))
break;
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree value = c_parser_peek_token (parser)->value;
c_id_kind kind = c_parser_peek_token (parser)->id_kind;
if (kind == C_ID_ADDRSPACE)
{
addr_space_t as
= c_parser_peek_token (parser)->keyword - RID_FIRST_ADDR_SPACE;
declspecs_add_addrspace (specs, as);
c_parser_consume_token (parser);
attrs_ok = true;
continue;
}
gcc_assert (!c_parser_next_token_is_qualifier (parser));
/* If we cannot accept a type, and the next token must start one,
exit. Do the same if we already have seen a tagged definition,
since it would be an error anyway and likely the user has simply
forgotten a semicolon. */
if (seen_type || !c_parser_next_tokens_start_typename (parser, la))
break;
/* Now at an unknown typename (C_ID_ID), a C_ID_TYPENAME or
a C_ID_CLASSNAME. */
c_parser_consume_token (parser);
seen_type = true;
attrs_ok = true;
if (kind == C_ID_ID)
{
error ("unknown type name %qE", value);
t.kind = ctsk_typedef;
t.spec = error_mark_node;
}
else if (kind == C_ID_TYPENAME
&& (!c_dialect_objc ()
|| c_parser_next_token_is_not (parser, CPP_LESS)))
{
t.kind = ctsk_typedef;
/* For a typedef name, record the meaning, not the name.
In case of 'foo foo, bar;'. */
t.spec = lookup_name (value);
}
else
{
tree proto = NULL_TREE;
gcc_assert (c_dialect_objc ());
t.kind = ctsk_objc;
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
t.spec = objc_get_protocol_qualified_type (value, proto);
}
t.expr = NULL_TREE;
t.expr_const_operands = true;
declspecs_add_type (loc, specs, t);
continue;
}
if (c_parser_next_token_is (parser, CPP_LESS))
{
/* Make "<SomeProtocol>" equivalent to "id <SomeProtocol>" -
nisse@lysator.liu.se. */
tree proto;
gcc_assert (c_dialect_objc ());
if (!typespec_ok || seen_type)
break;
proto = c_parser_objc_protocol_refs (parser);
t.kind = ctsk_objc;
t.spec = objc_get_protocol_qualified_type (NULL_TREE, proto);
t.expr = NULL_TREE;
t.expr_const_operands = true;
declspecs_add_type (loc, specs, t);
continue;
}
gcc_assert (c_parser_next_token_is (parser, CPP_KEYWORD));
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STATIC:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_INLINE:
case RID_AUTO:
case RID_THREAD:
if (!scspec_ok)
goto out;
attrs_ok = true;
/* TODO: Distinguish between function specifiers (inline)
and storage class specifiers, either here or in
declspecs_add_scspec. */
declspecs_add_scspec (specs, c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case RID_UNSIGNED:
case RID_LONG:
case RID_INT128:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
if (c_dialect_objc ())
parser->objc_need_raw_identifier = true;
t.kind = ctsk_resword;
t.spec = c_parser_peek_token (parser)->value;
t.expr = NULL_TREE;
t.expr_const_operands = true;
declspecs_add_type (loc, specs, t);
c_parser_consume_token (parser);
break;
case RID_ENUM:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_enum_specifier (parser);
declspecs_add_type (loc, specs, t);
break;
case RID_STRUCT:
case RID_UNION:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_struct_or_union_specifier (parser);
invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, t.spec);
declspecs_add_type (loc, specs, t);
break;
case RID_TYPEOF:
/* ??? The old parser rejected typeof after other type
specifiers, but is a syntax error the best way of
handling this? */
if (!typespec_ok || seen_type)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_typeof_specifier (parser);
declspecs_add_type (loc, specs, t);
break;
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
attrs_ok = true;
declspecs_add_qual (specs, c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case RID_ATTRIBUTE:
if (!attrs_ok)
goto out;
attrs = c_parser_attributes (parser);
declspecs_add_attrs (specs, attrs);
break;
default:
goto out;
}
}
out: ;
}
/* Parse an enum specifier (C90 6.5.2.2, C99 6.7.2.2).
enum-specifier:
enum attributes[opt] identifier[opt] { enumerator-list } attributes[opt]
enum attributes[opt] identifier[opt] { enumerator-list , } attributes[opt]
enum attributes[opt] identifier
The form with trailing comma is new in C99. The forms with
attributes are GNU extensions. In GNU C, we accept any expression
without commas in the syntax (assignment expressions, not just
conditional expressions); assignment expressions will be diagnosed
as non-constant.
enumerator-list:
enumerator
enumerator-list , enumerator
enumerator:
enumeration-constant
enumeration-constant = constant-expression
*/
static struct c_typespec
c_parser_enum_specifier (c_parser *parser)
{
struct c_typespec ret;
tree attrs;
tree ident = NULL_TREE;
location_t enum_loc;
location_t ident_loc = UNKNOWN_LOCATION; /* Quiet warning. */
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ENUM));
enum_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
enum_loc = c_parser_peek_token (parser)->location;
/* Set the location in case we create a decl now. */
c_parser_set_source_position_from_token (c_parser_peek_token (parser));
if (c_parser_next_token_is (parser, CPP_NAME))
{
ident = c_parser_peek_token (parser)->value;
ident_loc = c_parser_peek_token (parser)->location;
enum_loc = ident_loc;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* Parse an enum definition. */
struct c_enum_contents the_enum;
tree type = start_enum (enum_loc, &the_enum, ident);
tree postfix_attrs;
/* We chain the enumerators in reverse order, then put them in
forward order at the end. */
tree values = NULL_TREE;
c_parser_consume_token (parser);
while (true)
{
tree enum_id;
tree enum_value;
tree enum_decl;
bool seen_comma;
c_token *token;
location_t comma_loc = UNKNOWN_LOCATION; /* Quiet warning. */
location_t decl_loc, value_loc;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
values = error_mark_node;
break;
}
token = c_parser_peek_token (parser);
enum_id = token->value;
/* Set the location in case we create a decl now. */
c_parser_set_source_position_from_token (token);
decl_loc = value_loc = token->location;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_EQ))
{
c_parser_consume_token (parser);
value_loc = c_parser_peek_token (parser)->location;
enum_value = c_parser_expr_no_commas (parser, NULL).value;
}
else
enum_value = NULL_TREE;
enum_decl = build_enumerator (decl_loc, value_loc,
&the_enum, enum_id, enum_value);
TREE_CHAIN (enum_decl) = values;
values = enum_decl;
seen_comma = false;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
comma_loc = c_parser_peek_token (parser)->location;
seen_comma = true;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
if (seen_comma && !flag_isoc99)
pedwarn (comma_loc, OPT_pedantic, "comma at end of enumerator list");
c_parser_consume_token (parser);
break;
}
if (!seen_comma)
{
c_parser_error (parser, "expected %<,%> or %<}%>");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
values = error_mark_node;
break;
}
}
postfix_attrs = c_parser_attributes (parser);
ret.spec = finish_enum (type, nreverse (values),
chainon (attrs, postfix_attrs));
ret.kind = ctsk_tagdef;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
return ret;
}
else if (!ident)
{
c_parser_error (parser, "expected %<{%>");
ret.spec = error_mark_node;
ret.kind = ctsk_tagref;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
return ret;
}
ret = parser_xref_tag (ident_loc, ENUMERAL_TYPE, ident);
/* In ISO C, enumerated types can be referred to only if already
defined. */
if (pedantic && !COMPLETE_TYPE_P (ret.spec))
{
gcc_assert (ident);
pedwarn (enum_loc, OPT_pedantic,
"ISO C forbids forward references to %<enum%> types");
}
return ret;
}
/* Parse a struct or union specifier (C90 6.5.2.1, C99 6.7.2.1).
struct-or-union-specifier:
struct-or-union attributes[opt] identifier[opt]
{ struct-contents } attributes[opt]
struct-or-union attributes[opt] identifier
struct-contents:
struct-declaration-list
struct-declaration-list:
struct-declaration ;
struct-declaration-list struct-declaration ;
GNU extensions:
struct-contents:
empty
struct-declaration
struct-declaration-list struct-declaration
struct-declaration-list:
struct-declaration-list ;
;
(Note that in the syntax here, unlike that in ISO C, the semicolons
are included here rather than in struct-declaration, in order to
describe the syntax with extra semicolons and missing semicolon at
end.)
Objective-C:
struct-declaration-list:
@defs ( class-name )
(Note this does not include a trailing semicolon, but can be
followed by further declarations, and gets a pedwarn-if-pedantic
when followed by a semicolon.) */
static struct c_typespec
c_parser_struct_or_union_specifier (c_parser *parser)
{
struct c_typespec ret;
tree attrs;
tree ident = NULL_TREE;
location_t struct_loc;
location_t ident_loc = UNKNOWN_LOCATION;
enum tree_code code;
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STRUCT:
code = RECORD_TYPE;
break;
case RID_UNION:
code = UNION_TYPE;
break;
default:
gcc_unreachable ();
}
struct_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
/* Set the location in case we create a decl now. */
c_parser_set_source_position_from_token (c_parser_peek_token (parser));
if (c_parser_next_token_is (parser, CPP_NAME))
{
ident = c_parser_peek_token (parser)->value;
ident_loc = c_parser_peek_token (parser)->location;
struct_loc = ident_loc;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* Parse a struct or union definition. Start the scope of the
tag before parsing components. */
struct c_struct_parse_info *struct_info;
tree type = start_struct (struct_loc, code, ident, &struct_info);
tree postfix_attrs;
/* We chain the components in reverse order, then put them in
forward order at the end. Each struct-declaration may
declare multiple components (comma-separated), so we must use
chainon to join them, although when parsing each
struct-declaration we can use TREE_CHAIN directly.
The theory behind all this is that there will be more
semicolon separated fields than comma separated fields, and
so we'll be minimizing the number of node traversals required
by chainon. */
tree contents = NULL_TREE;
c_parser_consume_token (parser);
/* Handle the Objective-C @defs construct,
e.g. foo(sizeof(struct{ @defs(ClassName) }));. */
if (c_parser_next_token_is_keyword (parser, RID_AT_DEFS))
{
tree name;
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
goto end_at_defs;
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected class name");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto end_at_defs;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
contents = nreverse (objc_get_class_ivars (name));
}
end_at_defs:
/* Parse the struct-declarations and semicolons. Problems with
semicolons are diagnosed here; empty structures are diagnosed
elsewhere. */
while (true)
{
tree decls;
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"extra semicolon in struct or union specified");
c_parser_consume_token (parser);
continue;
}
/* Stop if at the end of the struct or union contents. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
break;
}
/* Accept #pragmas at struct scope. */
if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
c_parser_pragma (parser, pragma_external);
continue;
}
/* Parse some comma-separated declarations, but not the
trailing semicolon if any. */
decls = c_parser_struct_declaration (parser);
contents = chainon (decls, contents);
/* If no semicolon follows, either we have a parse error or
are at the end of the struct or union and should
pedwarn. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_consume_token (parser);
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
pedwarn (c_parser_peek_token (parser)->location, 0,
"no semicolon at end of struct or union");
else if (parser->error
|| !c_parser_next_token_starts_declspecs (parser))
{
c_parser_error (parser, "expected %<;%>");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
break;
}
/* If we come here, we have already emitted an error
for an expected `;', identifier or `(', and we also
recovered already. Go on with the next field. */
}
}
postfix_attrs = c_parser_attributes (parser);
ret.spec = finish_struct (struct_loc, type, nreverse (contents),
chainon (attrs, postfix_attrs), struct_info);
ret.kind = ctsk_tagdef;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
return ret;
}
else if (!ident)
{
c_parser_error (parser, "expected %<{%>");
ret.spec = error_mark_node;
ret.kind = ctsk_tagref;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
return ret;
}
ret = parser_xref_tag (ident_loc, code, ident);
return ret;
}
/* Parse a struct-declaration (C90 6.5.2.1, C99 6.7.2.1), *without*
the trailing semicolon.
struct-declaration:
specifier-qualifier-list struct-declarator-list
static_assert-declaration-no-semi
specifier-qualifier-list:
type-specifier specifier-qualifier-list[opt]
type-qualifier specifier-qualifier-list[opt]
attributes specifier-qualifier-list[opt]
struct-declarator-list:
struct-declarator
struct-declarator-list , attributes[opt] struct-declarator
struct-declarator:
declarator attributes[opt]
declarator[opt] : constant-expression attributes[opt]
GNU extensions:
struct-declaration:
__extension__ struct-declaration
specifier-qualifier-list
Unlike the ISO C syntax, semicolons are handled elsewhere. The use
of attributes where shown is a GNU extension. In GNU C, we accept
any expression without commas in the syntax (assignment
expressions, not just conditional expressions); assignment
expressions will be diagnosed as non-constant. */
static tree
c_parser_struct_declaration (c_parser *parser)
{
struct c_declspecs *specs;
tree prefix_attrs;
tree all_prefix_attrs;
tree decls;
location_t decl_loc;
if (c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
int ext;
tree decl;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
decl = c_parser_struct_declaration (parser);
restore_extension_diagnostics (ext);
return decl;
}
if (c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT))
{
c_parser_static_assert_declaration_no_semi (parser);
return NULL_TREE;
}
specs = build_null_declspecs ();
decl_loc = c_parser_peek_token (parser)->location;
c_parser_declspecs (parser, specs, false, true, true, cla_nonabstract_decl);
if (parser->error)
return NULL_TREE;
if (!specs->declspecs_seen_p)
{
c_parser_error (parser, "expected specifier-qualifier-list");
return NULL_TREE;
}
finish_declspecs (specs);
if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
tree ret;
if (specs->typespec_kind == ctsk_none)
{
pedwarn (decl_loc, OPT_pedantic,
"ISO C forbids member declarations with no members");
shadow_tag_warned (specs, pedantic);
ret = NULL_TREE;
}
else
{
/* Support for unnamed structs or unions as members of
structs or unions (which is [a] useful and [b] supports
MS P-SDK). */
tree attrs = NULL;
ret = grokfield (c_parser_peek_token (parser)->location,
build_id_declarator (NULL_TREE), specs,
NULL_TREE, &attrs);
if (ret)
decl_attributes (&ret, attrs, 0);
}
return ret;
}
/* Provide better error recovery. Note that a type name here is valid,
and will be treated as a field name. */
if (specs->typespec_kind == ctsk_tagdef
&& TREE_CODE (specs->type) != ENUMERAL_TYPE
&& c_parser_next_token_starts_declspecs (parser)
&& !c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected %<;%>, identifier or %<(%>");
parser->error = false;
return NULL_TREE;
}
pending_xref_error ();
prefix_attrs = specs->attrs;
all_prefix_attrs = prefix_attrs;
specs->attrs = NULL_TREE;
decls = NULL_TREE;
while (true)
{
/* Declaring one or more declarators or un-named bit-fields. */
struct c_declarator *declarator;
bool dummy = false;
if (c_parser_next_token_is (parser, CPP_COLON))
declarator = build_id_declarator (NULL_TREE);
else
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_NORMAL, &dummy);
if (declarator == NULL)
{
c_parser_skip_to_end_of_block_or_statement (parser);
break;
}
if (c_parser_next_token_is (parser, CPP_COLON)
|| c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
|| c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
tree postfix_attrs = NULL_TREE;
tree width = NULL_TREE;
tree d;
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
width = c_parser_expr_no_commas (parser, NULL).value;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_attributes (parser);
d = grokfield (c_parser_peek_token (parser)->location,
declarator, specs, width, &all_prefix_attrs);
decl_attributes (&d, chainon (postfix_attrs,
all_prefix_attrs), 0);
DECL_CHAIN (d) = decls;
decls = d;
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
all_prefix_attrs = chainon (c_parser_attributes (parser),
prefix_attrs);
else
all_prefix_attrs = prefix_attrs;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
/* Semicolon consumed in caller. */
break;
}
else
{
c_parser_error (parser, "expected %<,%>, %<;%> or %<}%>");
break;
}
}
else
{
c_parser_error (parser,
"expected %<:%>, %<,%>, %<;%>, %<}%> or "
"%<__attribute__%>");
break;
}
}
return decls;
}
/* Parse a typeof specifier (a GNU extension).
typeof-specifier:
typeof ( expression )
typeof ( type-name )
*/
static struct c_typespec
c_parser_typeof_specifier (c_parser *parser)
{
struct c_typespec ret;
ret.kind = ctsk_typeof;
ret.spec = error_mark_node;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_TYPEOF));
c_parser_consume_token (parser);
c_inhibit_evaluation_warnings++;
in_typeof++;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
c_inhibit_evaluation_warnings--;
in_typeof--;
return ret;
}
if (c_parser_next_tokens_start_typename (parser, cla_prefer_id))
{
struct c_type_name *type = c_parser_type_name (parser);
c_inhibit_evaluation_warnings--;
in_typeof--;
if (type != NULL)
{
ret.spec = groktypename (type, &ret.expr, &ret.expr_const_operands);
pop_maybe_used (variably_modified_type_p (ret.spec, NULL_TREE));
}
}
else
{
bool was_vm;
location_t here = c_parser_peek_token (parser)->location;
struct c_expr expr = c_parser_expression (parser);
c_inhibit_evaluation_warnings--;
in_typeof--;
if (TREE_CODE (expr.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1)))
error_at (here, "%<typeof%> applied to a bit-field");
mark_exp_read (expr.value);
ret.spec = TREE_TYPE (expr.value);
was_vm = variably_modified_type_p (ret.spec, NULL_TREE);
/* This is returned with the type so that when the type is
evaluated, this can be evaluated. */
if (was_vm)
ret.expr = c_fully_fold (expr.value, false, &ret.expr_const_operands);
pop_maybe_used (was_vm);
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
return ret;
}
/* Parse a declarator, possibly an abstract declarator (C90 6.5.4,
6.5.5, C99 6.7.5, 6.7.6). If TYPE_SEEN_P then a typedef name may
be redeclared; otherwise it may not. KIND indicates which kind of
declarator is wanted. Returns a valid declarator except in the
case of a syntax error in which case NULL is returned. *SEEN_ID is
set to true if an identifier being declared is seen; this is used
to diagnose bad forms of abstract array declarators and to
determine whether an identifier list is syntactically permitted.
declarator:
pointer[opt] direct-declarator
direct-declarator:
identifier
( attributes[opt] declarator )
direct-declarator array-declarator
direct-declarator ( parameter-type-list )
direct-declarator ( identifier-list[opt] )
pointer:
* type-qualifier-list[opt]
* type-qualifier-list[opt] pointer
type-qualifier-list:
type-qualifier
attributes
type-qualifier-list type-qualifier
type-qualifier-list attributes
parameter-type-list:
parameter-list
parameter-list , ...
parameter-list:
parameter-declaration
parameter-list , parameter-declaration
parameter-declaration:
declaration-specifiers declarator attributes[opt]
declaration-specifiers abstract-declarator[opt] attributes[opt]
identifier-list:
identifier
identifier-list , identifier
abstract-declarator:
pointer
pointer[opt] direct-abstract-declarator
direct-abstract-declarator:
( attributes[opt] abstract-declarator )
direct-abstract-declarator[opt] array-declarator
direct-abstract-declarator[opt] ( parameter-type-list[opt] )
GNU extensions:
direct-declarator:
direct-declarator ( parameter-forward-declarations
parameter-type-list[opt] )
direct-abstract-declarator:
direct-abstract-declarator[opt] ( parameter-forward-declarations
parameter-type-list[opt] )
parameter-forward-declarations:
parameter-list ;
parameter-forward-declarations parameter-list ;
The uses of attributes shown above are GNU extensions.
Some forms of array declarator are not included in C99 in the
syntax for abstract declarators; these are disallowed elsewhere.
This may be a defect (DR#289).
This function also accepts an omitted abstract declarator as being
an abstract declarator, although not part of the formal syntax. */
static struct c_declarator *
c_parser_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
bool *seen_id)
{
/* Parse any initial pointer part. */
if (c_parser_next_token_is (parser, CPP_MULT))
{
struct c_declspecs *quals_attrs = build_null_declspecs ();
struct c_declarator *inner;
c_parser_consume_token (parser);
c_parser_declspecs (parser, quals_attrs, false, false, true, cla_prefer_id);
inner = c_parser_declarator (parser, type_seen_p, kind, seen_id);
if (inner == NULL)
return NULL;
else
return make_pointer_declarator (quals_attrs, inner);
}
/* Now we have a direct declarator, direct abstract declarator or
nothing (which counts as a direct abstract declarator here). */
return c_parser_direct_declarator (parser, type_seen_p, kind, seen_id);
}
/* Parse a direct declarator or direct abstract declarator; arguments
as c_parser_declarator. */
static struct c_declarator *
c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
bool *seen_id)
{
/* The direct declarator must start with an identifier (possibly
omitted) or a parenthesized declarator (possibly abstract). In
an ordinary declarator, initial parentheses must start a
parenthesized declarator. In an abstract declarator or parameter
declarator, they could start a parenthesized declarator or a
parameter list. To tell which, the open parenthesis and any
following attributes must be read. If a declaration specifier
follows, then it is a parameter list; if the specifier is a
typedef name, there might be an ambiguity about redeclaring it,
which is resolved in the direction of treating it as a typedef
name. If a close parenthesis follows, it is also an empty
parameter list, as the syntax does not permit empty abstract
declarators. Otherwise, it is a parenthesized declarator (in
which case the analysis may be repeated inside it, recursively).
??? There is an ambiguity in a parameter declaration "int
(__attribute__((foo)) x)", where x is not a typedef name: it
could be an abstract declarator for a function, or declare x with
parentheses. The proper resolution of this ambiguity needs
documenting. At present we follow an accident of the old
parser's implementation, whereby the first parameter must have
some declaration specifiers other than just attributes. Thus as
a parameter declaration it is treated as a parenthesized
parameter named x, and as an abstract declarator it is
rejected.
??? Also following the old parser, attributes inside an empty
parameter list are ignored, making it a list not yielding a
prototype, rather than giving an error or making it have one
parameter with implicit type int.
??? Also following the old parser, typedef names may be
redeclared in declarators, but not Objective-C class names. */
if (kind != C_DTR_ABSTRACT
&& c_parser_next_token_is (parser, CPP_NAME)
&& ((type_seen_p
&& (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME
|| c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))
|| c_parser_peek_token (parser)->id_kind == C_ID_ID))
{
struct c_declarator *inner
= build_id_declarator (c_parser_peek_token (parser)->value);
*seen_id = true;
inner->id_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
if (kind != C_DTR_NORMAL
&& c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
struct c_declarator *inner = build_id_declarator (NULL_TREE);
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
/* Either we are at the end of an abstract declarator, or we have
parentheses. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree attrs;
struct c_declarator *inner;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
if (kind != C_DTR_NORMAL
&& (c_parser_next_token_starts_declspecs (parser)
|| c_parser_next_token_is (parser, CPP_CLOSE_PAREN)))
{
struct c_arg_info *args
= c_parser_parms_declarator (parser, kind == C_DTR_NORMAL,
attrs);
if (args == NULL)
return NULL;
else
{
inner
= build_function_declarator (args,
build_id_declarator (NULL_TREE));
return c_parser_direct_declarator_inner (parser, *seen_id,
inner);
}
}
/* A parenthesized declarator. */
inner = c_parser_declarator (parser, type_seen_p, kind, seen_id);
if (inner != NULL && attrs != NULL)
inner = build_attrs_declarator (attrs, inner);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (inner == NULL)
return NULL;
else
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
else
{
if (kind == C_DTR_NORMAL)
{
c_parser_error (parser, "expected identifier or %<(%>");
return NULL;
}
else
return build_id_declarator (NULL_TREE);
}
}
/* Parse part of a direct declarator or direct abstract declarator,
given that some (in INNER) has already been parsed; ID_PRESENT is
true if an identifier is present, false for an abstract
declarator. */
static struct c_declarator *
c_parser_direct_declarator_inner (c_parser *parser, bool id_present,
struct c_declarator *inner)
{
/* Parse a sequence of array declarators and parameter lists. */
if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
location_t brace_loc = c_parser_peek_token (parser)->location;
struct c_declarator *declarator;
struct c_declspecs *quals_attrs = build_null_declspecs ();
bool static_seen;
bool star_seen;
tree dimen;
c_parser_consume_token (parser);
c_parser_declspecs (parser, quals_attrs, false, false, true, cla_prefer_id);
static_seen = c_parser_next_token_is_keyword (parser, RID_STATIC);
if (static_seen)
c_parser_consume_token (parser);
if (static_seen && !quals_attrs->declspecs_seen_p)
c_parser_declspecs (parser, quals_attrs, false, false, true, cla_prefer_id);
if (!quals_attrs->declspecs_seen_p)
quals_attrs = NULL;
/* If "static" is present, there must be an array dimension.
Otherwise, there may be a dimension, "*", or no
dimension. */
if (static_seen)
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL).value;
}
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
dimen = NULL_TREE;
star_seen = false;
}
else if (c_parser_next_token_is (parser, CPP_MULT))
{
if (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_SQUARE)
{
dimen = NULL_TREE;
star_seen = true;
c_parser_consume_token (parser);
}
else
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL).value;
}
}
else
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL).value;
}
}
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
c_parser_consume_token (parser);
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
return NULL;
}
if (dimen)
mark_exp_read (dimen);
declarator = build_array_declarator (brace_loc, dimen, quals_attrs,
static_seen, star_seen);
if (declarator == NULL)
return NULL;
inner = set_array_declarator_inner (declarator, inner);
return c_parser_direct_declarator_inner (parser, id_present, inner);
}
else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree attrs;
struct c_arg_info *args;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
args = c_parser_parms_declarator (parser, id_present, attrs);
if (args == NULL)
return NULL;
else
{
inner = build_function_declarator (args, inner);
return c_parser_direct_declarator_inner (parser, id_present, inner);
}
}
return inner;
}
/* Parse a parameter list or identifier list, including the closing
parenthesis but not the opening one. ATTRS are the attributes at
the start of the list. ID_LIST_OK is true if an identifier list is
acceptable; such a list must not have attributes at the start. */
static struct c_arg_info *
c_parser_parms_declarator (c_parser *parser, bool id_list_ok, tree attrs)
{
push_scope ();
declare_parm_level ();
/* If the list starts with an identifier, it is an identifier list.
Otherwise, it is either a prototype list or an empty list. */
if (id_list_ok
&& !attrs
&& c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID
/* Look ahead to detect typos in type names. */
&& c_parser_peek_2nd_token (parser)->type != CPP_NAME
&& c_parser_peek_2nd_token (parser)->type != CPP_MULT
&& c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN
&& c_parser_peek_2nd_token (parser)->type != CPP_OPEN_SQUARE)
{
tree list = NULL_TREE, *nextp = &list;
while (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
*nextp = build_tree_list (NULL_TREE,
c_parser_peek_token (parser)->value);
nextp = & TREE_CHAIN (*nextp);
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_error (parser, "expected identifier");
break;
}
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
struct c_arg_info *ret = build_arg_info ();
ret->types = list;
c_parser_consume_token (parser);
pop_scope ();
return ret;
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
pop_scope ();
return NULL;
}
}
else
{
struct c_arg_info *ret = c_parser_parms_list_declarator (parser, attrs);
pop_scope ();
return ret;
}
}
/* Parse a parameter list (possibly empty), including the closing
parenthesis but not the opening one. ATTRS are the attributes at
the start of the list. */
static struct c_arg_info *
c_parser_parms_list_declarator (c_parser *parser, tree attrs)
{
bool bad_parm = false;
/* ??? Following the old parser, forward parameter declarations may
use abstract declarators, and if no real parameter declarations
follow the forward declarations then this is not diagnosed. Also
note as above that attributes are ignored as the only contents of
the parentheses, or as the only contents after forward
declarations. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
struct c_arg_info *ret = build_arg_info ();
c_parser_consume_token (parser);
return ret;
}
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
struct c_arg_info *ret = build_arg_info ();
/* Suppress -Wold-style-definition for this case. */
ret->types = error_mark_node;
error_at (c_parser_peek_token (parser)->location,
"ISO C requires a named argument before %<...%>");
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
return ret;
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
/* Nonempty list of parameters, either terminated with semicolon
(forward declarations; recurse) or with close parenthesis (normal
function) or with ", ... )" (variadic function). */
while (true)
{
/* Parse a parameter. */
struct c_parm *parm = c_parser_parameter_declaration (parser, attrs);
attrs = NULL_TREE;
if (parm == NULL)
bad_parm = true;
else
push_parm_decl (parm);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
tree new_attrs;
c_parser_consume_token (parser);
mark_forward_parm_decls ();
new_attrs = c_parser_attributes (parser);
return c_parser_parms_list_declarator (parser, new_attrs);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (bad_parm)
{
get_pending_sizes ();
return NULL;
}
else
return get_parm_info (false);
}
if (!c_parser_require (parser, CPP_COMMA,
"expected %<;%>, %<,%> or %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
get_pending_sizes ();
return NULL;
}
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (bad_parm)
{
get_pending_sizes ();
return NULL;
}
else
return get_parm_info (true);
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
get_pending_sizes ();
return NULL;
}
}
}
}
/* Parse a parameter declaration. ATTRS are the attributes at the
start of the declaration if it is the first parameter. */
static struct c_parm *
c_parser_parameter_declaration (c_parser *parser, tree attrs)
{
struct c_declspecs *specs;
struct c_declarator *declarator;
tree prefix_attrs;
tree postfix_attrs = NULL_TREE;
bool dummy = false;
if (!c_parser_next_token_starts_declspecs (parser))
{
c_token *token = c_parser_peek_token (parser);
if (parser->error)
return NULL;
c_parser_set_source_position_from_token (token);
if (c_parser_next_tokens_start_typename (parser, cla_prefer_type))
{
error ("unknown type name %qE", token->value);
parser->error = true;
}
/* ??? In some Objective-C cases '...' isn't applicable so there
should be a different message. */
else
c_parser_error (parser,
"expected declaration specifiers or %<...%>");
c_parser_skip_to_end_of_parameter (parser);
return NULL;
}
specs = build_null_declspecs ();
if (attrs)
{
declspecs_add_attrs (specs, attrs);
attrs = NULL_TREE;
}
c_parser_declspecs (parser, specs, true, true, true, cla_nonabstract_decl);
finish_declspecs (specs);
pending_xref_error ();
prefix_attrs = specs->attrs;
specs->attrs = NULL_TREE;
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_PARM, &dummy);
if (declarator == NULL)
{
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
return NULL;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_attributes (parser);
return build_c_parm (specs, chainon (postfix_attrs, prefix_attrs),
declarator);
}
/* Parse a string literal in an asm expression. It should not be
translated, and wide string literals are an error although
permitted by the syntax. This is a GNU extension.
asm-string-literal:
string-literal
??? At present, following the old parser, the caller needs to have
set lex_untranslated_string to 1. It would be better to follow the
C++ parser rather than using this kludge. */
static tree
c_parser_asm_string_literal (c_parser *parser)
{
tree str;
if (c_parser_next_token_is (parser, CPP_STRING))
{
str = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_WSTRING))
{
error_at (c_parser_peek_token (parser)->location,
"wide string literal in %<asm%>");
str = build_string (1, "");
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected string literal");
str = NULL_TREE;
}
return str;
}
/* Parse a simple asm expression. This is used in restricted
contexts, where a full expression with inputs and outputs does not
make sense. This is a GNU extension.
simple-asm-expr:
asm ( asm-string-literal )
*/
static tree
c_parser_simple_asm_expr (c_parser *parser)
{
tree str;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM));
/* ??? Follow the C++ parser rather than using the
lex_untranslated_string kludge. */
parser->lex_untranslated_string = true;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
parser->lex_untranslated_string = false;
return NULL_TREE;
}
str = c_parser_asm_string_literal (parser);
parser->lex_untranslated_string = false;
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
return str;
}
/* Parse (possibly empty) attributes. This is a GNU extension.
attributes:
empty
attributes attribute
attribute:
__attribute__ ( ( attribute-list ) )
attribute-list:
attrib
attribute_list , attrib
attrib:
empty
any-word
any-word ( identifier )
any-word ( identifier , nonempty-expr-list )
any-word ( expr-list )
where the "identifier" must not be declared as a type, and
"any-word" may be any identifier (including one declared as a
type), a reserved word storage class specifier, type specifier or
type qualifier. ??? This still leaves out most reserved keywords
(following the old parser), shouldn't we include them, and why not
allow identifiers declared as types to start the arguments? */
static tree
c_parser_attributes (c_parser *parser)
{
tree attrs = NULL_TREE;
while (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
/* ??? Follow the C++ parser rather than using the
lex_untranslated_string kludge. */
parser->lex_untranslated_string = true;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
parser->lex_untranslated_string = false;
return attrs;
}
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
parser->lex_untranslated_string = false;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return attrs;
}
/* Parse the attribute list. */
while (c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_NAME)
|| c_parser_next_token_is (parser, CPP_KEYWORD))
{
tree attr, attr_name, attr_args;
VEC(tree,gc) *expr_list;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
continue;
}
if (c_parser_next_token_is (parser, CPP_KEYWORD))
{
/* ??? See comment above about what keywords are
accepted here. */
bool ok;
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STATIC:
case RID_UNSIGNED:
case RID_LONG:
case RID_INT128:
case RID_CONST:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_SHORT:
case RID_INLINE:
case RID_VOLATILE:
case RID_SIGNED:
case RID_AUTO:
case RID_RESTRICT:
case RID_COMPLEX:
case RID_THREAD:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
ok = true;
break;
default:
ok = false;
break;
}
if (!ok)
break;
/* Accept __attribute__((__const)) as __attribute__((const))
etc. */
attr_name
= ridpointers[(int) c_parser_peek_token (parser)->keyword];
}
else
attr_name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN))
{
attr = build_tree_list (attr_name, NULL_TREE);
attrs = chainon (attrs, attr);
continue;
}
c_parser_consume_token (parser);
/* Parse the attribute contents. If they start with an
identifier which is followed by a comma or close
parenthesis, then the arguments start with that
identifier; otherwise they are an expression list.
In objective-c the identifier may be a classname. */
if (c_parser_next_token_is (parser, CPP_NAME)
&& (c_parser_peek_token (parser)->id_kind == C_ID_ID
|| (c_dialect_objc ()
&& c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))
&& ((c_parser_peek_2nd_token (parser)->type == CPP_COMMA)
|| (c_parser_peek_2nd_token (parser)->type
== CPP_CLOSE_PAREN)))
{
tree arg1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
attr_args = build_tree_list (NULL_TREE, arg1);
else
{
tree tree_list;
c_parser_consume_token (parser);
expr_list = c_parser_expr_list (parser, false, true, NULL);
tree_list = build_tree_list_vec (expr_list);
attr_args = tree_cons (NULL_TREE, arg1, tree_list);
release_tree_vector (expr_list);
}
}
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
attr_args = NULL_TREE;
else
{
expr_list = c_parser_expr_list (parser, false, true, NULL);
attr_args = build_tree_list_vec (expr_list);
release_tree_vector (expr_list);
}
}
attr = build_tree_list (attr_name, attr_args);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
parser->lex_untranslated_string = false;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
attrs = chainon (attrs, attr);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
parser->lex_untranslated_string = false;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
parser->lex_untranslated_string = false;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
parser->lex_untranslated_string = false;
}
return attrs;
}
/* Parse a type name (C90 6.5.5, C99 6.7.6).
type-name:
specifier-qualifier-list abstract-declarator[opt]
*/
static struct c_type_name *
c_parser_type_name (c_parser *parser)
{
struct c_declspecs *specs = build_null_declspecs ();
struct c_declarator *declarator;
struct c_type_name *ret;
bool dummy = false;
c_parser_declspecs (parser, specs, false, true, true, cla_prefer_type);
if (!specs->declspecs_seen_p)
{
c_parser_error (parser, "expected specifier-qualifier-list");
return NULL;
}
if (specs->type != error_mark_node)
{
pending_xref_error ();
finish_declspecs (specs);
}
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_ABSTRACT, &dummy);
if (declarator == NULL)
return NULL;
ret = XOBNEW (&parser_obstack, struct c_type_name);
ret->specs = specs;
ret->declarator = declarator;
return ret;
}
/* Parse an initializer (C90 6.5.7, C99 6.7.8).
initializer:
assignment-expression
{ initializer-list }
{ initializer-list , }
initializer-list:
designation[opt] initializer
initializer-list , designation[opt] initializer
designation:
designator-list =
designator-list:
designator
designator-list designator
designator:
array-designator
. identifier
array-designator:
[ constant-expression ]
GNU extensions:
initializer:
{ }
designation:
array-designator
identifier :
array-designator:
[ constant-expression ... constant-expression ]
Any expression without commas is accepted in the syntax for the
constant-expressions, with non-constant expressions rejected later.
This function is only used for top-level initializers; for nested
ones, see c_parser_initval. */
static struct c_expr
c_parser_initializer (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return c_parser_braced_init (parser, NULL_TREE, false);
else
{
struct c_expr ret;
location_t loc = c_parser_peek_token (parser)->location;
ret = c_parser_expr_no_commas (parser, NULL);
if (TREE_CODE (ret.value) != STRING_CST
&& TREE_CODE (ret.value) != COMPOUND_LITERAL_EXPR)
ret = default_function_array_read_conversion (loc, ret);
return ret;
}
}
/* Parse a braced initializer list. TYPE is the type specified for a
compound literal, and NULL_TREE for other initializers and for
nested braced lists. NESTED_P is true for nested braced lists,
false for the list of a compound literal or the list that is the
top-level initializer in a declaration. */
static struct c_expr
c_parser_braced_init (c_parser *parser, tree type, bool nested_p)
{
struct c_expr ret;
struct obstack braced_init_obstack;
location_t brace_loc = c_parser_peek_token (parser)->location;
gcc_obstack_init (&braced_init_obstack);
gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE));
c_parser_consume_token (parser);
if (nested_p)
push_init_level (0, &braced_init_obstack);
else
really_start_incremental_init (type);
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
pedwarn (brace_loc, OPT_pedantic, "ISO C forbids empty initializer braces");
}
else
{
/* Parse a non-empty initializer list, possibly with a trailing
comma. */
while (true)
{
c_parser_initelt (parser, &braced_init_obstack);
if (parser->error)
break;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
}
}
if (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE))
{
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %<}%>");
pop_init_level (0, &braced_init_obstack);
obstack_free (&braced_init_obstack, NULL);
return ret;
}
c_parser_consume_token (parser);
ret = pop_init_level (0, &braced_init_obstack);
obstack_free (&braced_init_obstack, NULL);
return ret;
}
/* Parse a nested initializer, including designators. */
static void
c_parser_initelt (c_parser *parser, struct obstack * braced_init_obstack)
{
/* Parse any designator or designator list. A single array
designator may have the subsequent "=" omitted in GNU C, but a
longer list or a structure member designator may not. */
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
/* Old-style structure member designator. */
set_init_label (c_parser_peek_token (parser)->value,
braced_init_obstack);
/* Use the colon as the error location. */
pedwarn (c_parser_peek_2nd_token (parser)->location, OPT_pedantic,
"obsolete use of designated initializer with %<:%>");
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
else
{
/* des_seen is 0 if there have been no designators, 1 if there
has been a single array designator and 2 otherwise. */
int des_seen = 0;
/* Location of a designator. */
location_t des_loc = UNKNOWN_LOCATION; /* Quiet warning. */
while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)
|| c_parser_next_token_is (parser, CPP_DOT))
{
int des_prev = des_seen;
if (!des_seen)
des_loc = c_parser_peek_token (parser)->location;
if (des_seen < 2)
des_seen++;
if (c_parser_next_token_is (parser, CPP_DOT))
{
des_seen = 2;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
set_init_label (c_parser_peek_token (parser)->value,
braced_init_obstack);
c_parser_consume_token (parser);
}
else
{
struct c_expr init;
init.value = error_mark_node;
init.original_code = ERROR_MARK;
init.original_type = NULL;
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
process_init_element (init, false, braced_init_obstack);
return;
}
}
else
{
tree first, second;
location_t ellipsis_loc = UNKNOWN_LOCATION; /* Quiet warning. */
/* ??? Following the old parser, [ objc-receiver
objc-message-args ] is accepted as an initializer,
being distinguished from a designator by what follows
the first assignment expression inside the square
brackets, but after a first array designator a
subsequent square bracket is for Objective-C taken to
start an expression, using the obsolete form of
designated initializer without '=', rather than
possibly being a second level of designation: in LALR
terms, the '[' is shifted rather than reducing
designator to designator-list. */
if (des_prev == 1 && c_dialect_objc ())
{
des_seen = des_prev;
break;
}
if (des_prev == 0 && c_dialect_objc ())
{
/* This might be an array designator or an
Objective-C message expression. If the former,
continue parsing here; if the latter, parse the
remainder of the initializer given the starting
primary-expression. ??? It might make sense to
distinguish when des_prev == 1 as well; see
previous comment. */
tree rec, args;
struct c_expr mexpr;
c_parser_consume_token (parser);
if (c_parser_peek_token (parser)->type == CPP_NAME
&& ((c_parser_peek_token (parser)->id_kind
== C_ID_TYPENAME)
|| (c_parser_peek_token (parser)->id_kind
== C_ID_CLASSNAME)))
{
/* Type name receiver. */
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
rec = objc_get_class_reference (id);
goto parse_message_args;
}
first = c_parser_expr_no_commas (parser, NULL).value;
mark_exp_read (first);
if (c_parser_next_token_is (parser, CPP_ELLIPSIS)
|| c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
goto array_desig_after_first;
/* Expression receiver. So far only one part
without commas has been parsed; there might be
more of the expression. */
rec = first;
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_expr next;
location_t comma_loc, exp_loc;
comma_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
next = c_parser_expr_no_commas (parser, NULL);
next = default_function_array_read_conversion (exp_loc,
next);
rec = build_compound_expr (comma_loc, rec, next.value);
}
parse_message_args:
/* Now parse the objc-message-args. */
args = c_parser_objc_message_args (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
mexpr.value
= objc_build_message_expr (build_tree_list (rec, args));
mexpr.original_code = ERROR_MARK;
mexpr.original_type = NULL;
/* Now parse and process the remainder of the
initializer, starting with this message
expression as a primary-expression. */
c_parser_initval (parser, &mexpr, braced_init_obstack);
return;
}
c_parser_consume_token (parser);
first = c_parser_expr_no_commas (parser, NULL).value;
mark_exp_read (first);
array_desig_after_first:
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
ellipsis_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
second = c_parser_expr_no_commas (parser, NULL).value;
mark_exp_read (second);
}
else
second = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
c_parser_consume_token (parser);
set_init_index (first, second, braced_init_obstack);
if (second)
pedwarn (ellipsis_loc, OPT_pedantic,
"ISO C forbids specifying range of elements to initialize");
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
}
}
if (des_seen >= 1)
{
if (c_parser_next_token_is (parser, CPP_EQ))
{
if (!flag_isoc99)
pedwarn (des_loc, OPT_pedantic,
"ISO C90 forbids specifying subobject to initialize");
c_parser_consume_token (parser);
}
else
{
if (des_seen == 1)
pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"obsolete use of designated initializer without %<=%>");
else
{
struct c_expr init;
init.value = error_mark_node;
init.original_code = ERROR_MARK;
init.original_type = NULL;
c_parser_error (parser, "expected %<=%>");
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
process_init_element (init, false, braced_init_obstack);
return;
}
}
}
}
c_parser_initval (parser, NULL, braced_init_obstack);
}
/* Parse a nested initializer; as c_parser_initializer but parses
initializers within braced lists, after any designators have been
applied. If AFTER is not NULL then it is an Objective-C message
expression which is the primary-expression starting the
initializer. */
static void
c_parser_initval (c_parser *parser, struct c_expr *after,
struct obstack * braced_init_obstack)
{
struct c_expr init;
gcc_assert (!after || c_dialect_objc ());
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE) && !after)
init = c_parser_braced_init (parser, NULL_TREE, true);
else
{
location_t loc = c_parser_peek_token (parser)->location;
init = c_parser_expr_no_commas (parser, after);
if (init.value != NULL_TREE
&& TREE_CODE (init.value) != STRING_CST
&& TREE_CODE (init.value) != COMPOUND_LITERAL_EXPR)
init = default_function_array_read_conversion (loc, init);
}
process_init_element (init, false, braced_init_obstack);
}
/* Parse a compound statement (possibly a function body) (C90 6.6.2,
C99 6.8.2).
compound-statement:
{ block-item-list[opt] }
{ label-declarations block-item-list }
block-item-list:
block-item
block-item-list block-item
block-item:
nested-declaration
statement
nested-declaration:
declaration
GNU extensions:
compound-statement:
{ label-declarations block-item-list }
nested-declaration:
__extension__ nested-declaration
nested-function-definition
label-declarations:
label-declaration
label-declarations label-declaration
label-declaration:
__label__ identifier-list ;
Allowing the mixing of declarations and code is new in C99. The
GNU syntax also permits (not shown above) labels at the end of
compound statements, which yield an error. We don't allow labels
on declarations; this might seem like a natural extension, but
there would be a conflict between attributes on the label and
prefix attributes on the declaration. ??? The syntax follows the
old parser in requiring something after label declarations.
Although they are erroneous if the labels declared aren't defined,
is it useful for the syntax to be this way?
OpenMP:
block-item:
openmp-directive
openmp-directive:
barrier-directive
flush-directive */
static tree
c_parser_compound_statement (c_parser *parser)
{
tree stmt;
location_t brace_loc;
brace_loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
{
/* Ensure a scope is entered and left anyway to avoid confusion
if we have just prepared to enter a function body. */
stmt = c_begin_compound_stmt (true);
c_end_compound_stmt (brace_loc, stmt, true);
return error_mark_node;
}
stmt = c_begin_compound_stmt (true);
c_parser_compound_statement_nostart (parser);
return c_end_compound_stmt (brace_loc, stmt, true);
}
/* Parse a compound statement except for the opening brace. This is
used for parsing both compound statements and statement expressions
(which follow different paths to handling the opening). */
static void
c_parser_compound_statement_nostart (c_parser *parser)
{
bool last_stmt = false;
bool last_label = false;
bool save_valid_for_pragma = valid_location_for_stdc_pragma_p ();
location_t label_loc = UNKNOWN_LOCATION; /* Quiet warning. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
return;
}
mark_valid_location_for_stdc_pragma (true);
if (c_parser_next_token_is_keyword (parser, RID_LABEL))
{
/* Read zero or more forward-declarations for labels that nested
functions can jump to. */
mark_valid_location_for_stdc_pragma (false);
while (c_parser_next_token_is_keyword (parser, RID_LABEL))
{
label_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names,
are OK here. */
while (true)
{
tree label;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
label
= declare_label (c_parser_peek_token (parser)->value);
C_DECLARED_LABEL_FLAG (label) = 1;
add_stmt (build_stmt (label_loc, DECL_EXPR, label));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
pedwarn (label_loc, OPT_pedantic, "ISO C forbids label declarations");
}
/* We must now have at least one statement, label or declaration. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
c_parser_error (parser, "expected declaration or statement");
c_parser_consume_token (parser);
return;
}
while (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE))
{
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
{
if (c_parser_next_token_is_keyword (parser, RID_CASE))
label_loc = c_parser_peek_2nd_token (parser)->location;
else
label_loc = c_parser_peek_token (parser)->location;
last_label = true;
last_stmt = false;
mark_valid_location_for_stdc_pragma (false);
c_parser_label (parser);
}
else if (!last_label
&& c_parser_next_tokens_start_declaration (parser))
{
last_label = false;
mark_valid_location_for_stdc_pragma (false);
c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL);
if (last_stmt)
pedwarn_c90 (loc,
(pedantic && !flag_isoc99)
? OPT_pedantic
: OPT_Wdeclaration_after_statement,
"ISO C90 forbids mixed declarations and code");
last_stmt = false;
}
else if (!last_label
&& c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
/* __extension__ can start a declaration, but is also an
unary operator that can start an expression. Consume all
but the last of a possible series of __extension__ to
determine which. */
while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD
&& (c_parser_peek_2nd_token (parser)->keyword
== RID_EXTENSION))
c_parser_consume_token (parser);
if (c_token_starts_declaration (c_parser_peek_2nd_token (parser)))
{
int ext;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
last_label = false;
mark_valid_location_for_stdc_pragma (false);
c_parser_declaration_or_fndef (parser, true, true, true, true,
true, NULL);
/* Following the old parser, __extension__ does not
disable this diagnostic. */
restore_extension_diagnostics (ext);
if (last_stmt)
pedwarn_c90 (loc, (pedantic && !flag_isoc99)
? OPT_pedantic
: OPT_Wdeclaration_after_statement,
"ISO C90 forbids mixed declarations and code");
last_stmt = false;
}
else
goto statement;
}
else if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
/* External pragmas, and some omp pragmas, are not associated
with regular c code, and so are not to be considered statements
syntactically. This ensures that the user doesn't put them
places that would turn into syntax errors if the directive
were ignored. */
if (c_parser_pragma (parser, pragma_compound))
last_label = false, last_stmt = true;
}
else if (c_parser_next_token_is (parser, CPP_EOF))
{
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
c_parser_error (parser, "expected declaration or statement");
return;
}
else if (c_parser_next_token_is_keyword (parser, RID_ELSE))
{
if (parser->in_if_block)
{
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
error_at (loc, """expected %<}%> before %<else%>");
return;
}
else
{
error_at (loc, "%<else%> without a previous %<if%>");
c_parser_consume_token (parser);
continue;
}
}
else
{
statement:
last_label = false;
last_stmt = true;
mark_valid_location_for_stdc_pragma (false);
c_parser_statement_after_labels (parser);
}
parser->error = false;
}
if (last_label)
error_at (label_loc, "label at end of compound statement");
c_parser_consume_token (parser);
/* Restore the value we started with. */
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
}
/* Parse a label (C90 6.6.1, C99 6.8.1).
label:
identifier : attributes[opt]
case constant-expression :
default :
GNU extensions:
label:
case constant-expression ... constant-expression :
The use of attributes on labels is a GNU extension. The syntax in
GNU C accepts any expressions without commas, non-constant
expressions being rejected later. */
static void
c_parser_label (c_parser *parser)
{
location_t loc1 = c_parser_peek_token (parser)->location;
tree label = NULL_TREE;
if (c_parser_next_token_is_keyword (parser, RID_CASE))
{
tree exp1, exp2;
c_parser_consume_token (parser);
exp1 = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
label = do_case (loc1, exp1, NULL_TREE);
}
else if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
exp2 = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
label = do_case (loc1, exp1, exp2);
}
else
c_parser_error (parser, "expected %<:%> or %<...%>");
}
else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT))
{
c_parser_consume_token (parser);
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
label = do_case (loc1, NULL_TREE, NULL_TREE);
}
else
{
tree name = c_parser_peek_token (parser)->value;
tree tlab;
tree attrs;
location_t loc2 = c_parser_peek_token (parser)->location;
gcc_assert (c_parser_next_token_is (parser, CPP_NAME));
c_parser_consume_token (parser);
gcc_assert (c_parser_next_token_is (parser, CPP_COLON));
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
tlab = define_label (loc2, name);
if (tlab)
{
decl_attributes (&tlab, attrs, 0);
label = add_stmt (build_stmt (loc1, LABEL_EXPR, tlab));
}
}
if (label)
{
if (c_parser_next_tokens_start_declaration (parser))
{
error_at (c_parser_peek_token (parser)->location,
"a label can only be part of a statement and "
"a declaration is not a statement");
c_parser_declaration_or_fndef (parser, /*fndef_ok*/ false,
/*static_assert_ok*/ true,
/*nested*/ true, /*empty_ok*/ false,
/*start_attr_ok*/ true, NULL);
}
}
}
/* Parse a statement (C90 6.6, C99 6.8).
statement:
labeled-statement
compound-statement
expression-statement
selection-statement
iteration-statement
jump-statement
labeled-statement:
label statement
expression-statement:
expression[opt] ;
selection-statement:
if-statement
switch-statement
iteration-statement:
while-statement
do-statement
for-statement
jump-statement:
goto identifier ;
continue ;
break ;
return expression[opt] ;
GNU extensions:
statement:
asm-statement
jump-statement:
goto * expression ;
Objective-C:
statement:
objc-throw-statement
objc-try-catch-statement
objc-synchronized-statement
objc-throw-statement:
@throw expression ;
@throw ;
OpenMP:
statement:
openmp-construct
openmp-construct:
parallel-construct
for-construct
sections-construct
single-construct
parallel-for-construct
parallel-sections-construct
master-construct
critical-construct
atomic-construct
ordered-construct
parallel-construct:
parallel-directive structured-block
for-construct:
for-directive iteration-statement
sections-construct:
sections-directive section-scope
single-construct:
single-directive structured-block
parallel-for-construct:
parallel-for-directive iteration-statement
parallel-sections-construct:
parallel-sections-directive section-scope
master-construct:
master-directive structured-block
critical-construct:
critical-directive structured-block
atomic-construct:
atomic-directive expression-statement
ordered-construct:
ordered-directive structured-block */
static void
c_parser_statement (c_parser *parser)
{
while (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
c_parser_label (parser);
c_parser_statement_after_labels (parser);
}
/* Parse a statement, other than a labeled statement. */
static void
c_parser_statement_after_labels (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
tree stmt = NULL_TREE;
bool in_if_block = parser->in_if_block;
parser->in_if_block = false;
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_BRACE:
add_stmt (c_parser_compound_statement (parser));
break;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_IF:
c_parser_if_statement (parser);
break;
case RID_SWITCH:
c_parser_switch_statement (parser);
break;
case RID_WHILE:
c_parser_while_statement (parser);
break;
case RID_DO:
c_parser_do_statement (parser);
break;
case RID_FOR:
c_parser_for_statement (parser);
break;
case RID_GOTO:
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
stmt = c_finish_goto_label (loc,
c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_MULT))
{
tree val;
c_parser_consume_token (parser);
val = c_parser_expression (parser).value;
mark_exp_read (val);
stmt = c_finish_goto_ptr (loc, val);
}
else
c_parser_error (parser, "expected identifier or %<*%>");
goto expect_semicolon;
case RID_CONTINUE:
c_parser_consume_token (parser);
stmt = c_finish_bc_stmt (loc, &c_cont_label, false);
goto expect_semicolon;
case RID_BREAK:
c_parser_consume_token (parser);
stmt = c_finish_bc_stmt (loc, &c_break_label, true);
goto expect_semicolon;
case RID_RETURN:
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
stmt = c_finish_return (loc, NULL_TREE, NULL_TREE);
c_parser_consume_token (parser);
}
else
{
struct c_expr expr = c_parser_expression_conv (parser);
mark_exp_read (expr.value);
stmt = c_finish_return (loc, expr.value, expr.original_type);
goto expect_semicolon;
}
break;
case RID_ASM:
stmt = c_parser_asm_statement (parser);
break;
case RID_AT_THROW:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
stmt = objc_build_throw_stmt (loc, NULL_TREE);
c_parser_consume_token (parser);
}
else
{
tree expr = c_parser_expression (parser).value;
expr = c_fully_fold (expr, false, NULL);
stmt = objc_build_throw_stmt (loc, expr);
goto expect_semicolon;
}
break;
case RID_AT_TRY:
gcc_assert (c_dialect_objc ());
c_parser_objc_try_catch_finally_statement (parser);
break;
case RID_AT_SYNCHRONIZED:
gcc_assert (c_dialect_objc ());
c_parser_objc_synchronized_statement (parser);
break;
default:
goto expr_stmt;
}
break;
case CPP_SEMICOLON:
c_parser_consume_token (parser);
break;
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
/* Avoid infinite loop in error recovery:
c_parser_skip_until_found stops at a closing nesting
delimiter without consuming it, but here we need to consume
it to proceed further. */
c_parser_error (parser, "expected statement");
c_parser_consume_token (parser);
break;
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_stmt);
break;
default:
expr_stmt:
stmt = c_finish_expr_stmt (loc, c_parser_expression_conv (parser).value);
expect_semicolon:
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
break;
}
/* Two cases cannot and do not have line numbers associated: If stmt
is degenerate, such as "2;", then stmt is an INTEGER_CST, which
cannot hold line numbers. But that's OK because the statement
will either be changed to a MODIFY_EXPR during gimplification of
the statement expr, or discarded. If stmt was compound, but
without new variables, we will have skipped the creation of a
BIND and will have a bare STATEMENT_LIST. But that's OK because
(recursively) all of the component statements should already have
line numbers assigned. ??? Can we discard no-op statements
earlier? */
if (CAN_HAVE_LOCATION_P (stmt)
&& EXPR_LOCATION (stmt) == UNKNOWN_LOCATION)
SET_EXPR_LOCATION (stmt, loc);
parser->in_if_block = in_if_block;
}
/* Parse the condition from an if, do, while or for statements. */
static tree
c_parser_condition (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
tree cond;
cond = c_parser_expression_conv (parser).value;
cond = c_objc_common_truthvalue_conversion (loc, cond);
cond = c_fully_fold (cond, false, NULL);
if (warn_sequence_point)
verify_sequence_points (cond);
return cond;
}
/* Parse a parenthesized condition from an if, do or while statement.
condition:
( expression )
*/
static tree
c_parser_paren_condition (c_parser *parser)
{
tree cond;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return error_mark_node;
cond = c_parser_condition (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
return cond;
}
/* Parse a statement which is a block in C99. */
static tree
c_parser_c99_block_statement (c_parser *parser)
{
tree block = c_begin_compound_stmt (flag_isoc99);
location_t loc = c_parser_peek_token (parser)->location;
c_parser_statement (parser);
return c_end_compound_stmt (loc, block, flag_isoc99);
}
/* Parse the body of an if statement. This is just parsing a
statement but (a) it is a block in C99, (b) we track whether the
body is an if statement for the sake of -Wparentheses warnings, (c)
we handle an empty body specially for the sake of -Wempty-body
warnings, and (d) we call parser_compound_statement directly
because c_parser_statement_after_labels resets
parser->in_if_block. */
static tree
c_parser_if_body (c_parser *parser, bool *if_p)
{
tree block = c_begin_compound_stmt (flag_isoc99);
location_t body_loc = c_parser_peek_token (parser)->location;
while (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
c_parser_label (parser);
*if_p = c_parser_next_token_is_keyword (parser, RID_IF);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
location_t loc = c_parser_peek_token (parser)->location;
add_stmt (build_empty_stmt (loc));
c_parser_consume_token (parser);
if (!c_parser_next_token_is_keyword (parser, RID_ELSE))
warning_at (loc, OPT_Wempty_body,
"suggest braces around empty body in an %<if%> statement");
}
else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
add_stmt (c_parser_compound_statement (parser));
else
c_parser_statement_after_labels (parser);
return c_end_compound_stmt (body_loc, block, flag_isoc99);
}
/* Parse the else body of an if statement. This is just parsing a
statement but (a) it is a block in C99, (b) we handle an empty body
specially for the sake of -Wempty-body warnings. */
static tree
c_parser_else_body (c_parser *parser)
{
location_t else_loc = c_parser_peek_token (parser)->location;
tree block = c_begin_compound_stmt (flag_isoc99);
while (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
c_parser_label (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
location_t loc = c_parser_peek_token (parser)->location;
warning_at (loc,
OPT_Wempty_body,
"suggest braces around empty body in an %<else%> statement");
add_stmt (build_empty_stmt (loc));
c_parser_consume_token (parser);
}
else
c_parser_statement_after_labels (parser);
return c_end_compound_stmt (else_loc, block, flag_isoc99);
}
/* Parse an if statement (C90 6.6.4, C99 6.8.4).
if-statement:
if ( expression ) statement
if ( expression ) statement else statement
*/
static void
c_parser_if_statement (c_parser *parser)
{
tree block;
location_t loc;
tree cond;
bool first_if = false;
tree first_body, second_body;
bool in_if_block;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_IF));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
cond = c_parser_paren_condition (parser);
in_if_block = parser->in_if_block;
parser->in_if_block = true;
first_body = c_parser_if_body (parser, &first_if);
parser->in_if_block = in_if_block;
if (c_parser_next_token_is_keyword (parser, RID_ELSE))
{
c_parser_consume_token (parser);
second_body = c_parser_else_body (parser);
}
else
second_body = NULL_TREE;
c_finish_if_stmt (loc, cond, first_body, second_body, first_if);
add_stmt (c_end_compound_stmt (loc, block, flag_isoc99));
}
/* Parse a switch statement (C90 6.6.4, C99 6.8.4).
switch-statement:
switch (expression) statement
*/
static void
c_parser_switch_statement (c_parser *parser)
{
tree block, expr, body, save_break;
location_t switch_loc = c_parser_peek_token (parser)->location;
location_t switch_cond_loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_SWITCH));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
switch_cond_loc = c_parser_peek_token (parser)->location;
expr = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
{
switch_cond_loc = UNKNOWN_LOCATION;
expr = error_mark_node;
}
c_start_case (switch_loc, switch_cond_loc, expr);
save_break = c_break_label;
c_break_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
c_finish_case (body);
if (c_break_label)
{
location_t here = c_parser_peek_token (parser)->location;
tree t = build1 (LABEL_EXPR, void_type_node, c_break_label);
SET_EXPR_LOCATION (t, here);
add_stmt (t);
}
c_break_label = save_break;
add_stmt (c_end_compound_stmt (switch_loc, block, flag_isoc99));
}
/* Parse a while statement (C90 6.6.5, C99 6.8.5).
while-statement:
while (expression) statement
*/
static void
c_parser_while_statement (c_parser *parser)
{
tree block, cond, body, save_break, save_cont;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_WHILE));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
cond = c_parser_paren_condition (parser);
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
c_finish_loop (loc, cond, NULL, body, c_break_label, c_cont_label, true);
add_stmt (c_end_compound_stmt (loc, block, flag_isoc99));
c_break_label = save_break;
c_cont_label = save_cont;
}
/* Parse a do statement (C90 6.6.5, C99 6.8.5).
do-statement:
do statement while ( expression ) ;
*/
static void
c_parser_do_statement (c_parser *parser)
{
tree block, cond, body, save_break, save_cont, new_break, new_cont;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_DO));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
warning_at (c_parser_peek_token (parser)->location,
OPT_Wempty_body,
"suggest braces around empty body in %<do%> statement");
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
c_parser_require_keyword (parser, RID_WHILE, "expected %<while%>");
new_break = c_break_label;
c_break_label = save_break;
new_cont = c_cont_label;
c_cont_label = save_cont;
cond = c_parser_paren_condition (parser);
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
c_finish_loop (loc, cond, NULL, body, new_break, new_cont, false);
add_stmt (c_end_compound_stmt (loc, block, flag_isoc99));
}
/* Parse a for statement (C90 6.6.5, C99 6.8.5).
for-statement:
for ( expression[opt] ; expression[opt] ; expression[opt] ) statement
for ( nested-declaration expression[opt] ; expression[opt] ) statement
The form with a declaration is new in C99.
??? In accordance with the old parser, the declaration may be a
nested function, which is then rejected in check_for_loop_decls,
but does it make any sense for this to be included in the grammar?
Note in particular that the nested function does not include a
trailing ';', whereas the "declaration" production includes one.
Also, can we reject bad declarations earlier and cheaper than
check_for_loop_decls?
In Objective-C, there are two additional variants:
foreach-statement:
for ( expression in expresssion ) statement
for ( declaration in expression ) statement
This is inconsistent with C, because the second variant is allowed
even if c99 is not enabled.
The rest of the comment documents these Objective-C foreach-statement.
Here is the canonical example of the first variant:
for (object in array) { do something with object }
we call the first expression ("object") the "object_expression" and
the second expression ("array") the "collection_expression".
object_expression must be an lvalue of type "id" (a generic Objective-C
object) because the loop works by assigning to object_expression the
various objects from the collection_expression. collection_expression
must evaluate to something of type "id" which responds to the method
countByEnumeratingWithState:objects:count:.
The canonical example of the second variant is:
for (id object in array) { do something with object }
which is completely equivalent to
{
id object;
for (object in array) { do something with object }
}
Note that initizializing 'object' in some way (eg, "for ((object =
xxx) in array) { do something with object }") is possibly
technically valid, but completely pointless as 'object' will be
assigned to something else as soon as the loop starts. We should
most likely reject it (TODO).
The beginning of the Objective-C foreach-statement looks exactly
like the beginning of the for-statement, and we can tell it is a
foreach-statement only because the initial declaration or
expression is terminated by 'in' instead of ';'.
*/
static void
c_parser_for_statement (c_parser *parser)
{
tree block, cond, incr, save_break, save_cont, body;
/* The following are only used when parsing an ObjC foreach statement. */
tree object_expression;
/* Silence the bogus uninitialized warning. */
tree collection_expression = NULL;
location_t loc = c_parser_peek_token (parser)->location;
location_t for_loc = c_parser_peek_token (parser)->location;
bool is_foreach_statement = false;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_FOR));
c_parser_consume_token (parser);
/* Open a compound statement in Objective-C as well, just in case this is
as foreach expression. */
block = c_begin_compound_stmt (flag_isoc99 || c_dialect_objc ());
cond = error_mark_node;
incr = error_mark_node;
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
/* Parse the initialization declaration or expression. */
object_expression = error_mark_node;
parser->objc_could_be_foreach_context = c_dialect_objc ();
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
parser->objc_could_be_foreach_context = false;
c_parser_consume_token (parser);
c_finish_expr_stmt (loc, NULL_TREE);
}
else if (c_parser_next_tokens_start_declaration (parser))
{
c_parser_declaration_or_fndef (parser, true, true, true, true, true,
&object_expression);
parser->objc_could_be_foreach_context = false;
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
c_parser_consume_token (parser);
is_foreach_statement = true;
if (check_for_loop_decls (for_loc, true) == NULL_TREE)
c_parser_error (parser, "multiple iterating variables in fast enumeration");
}
else
check_for_loop_decls (for_loc, flag_isoc99);
}
else if (c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
/* __extension__ can start a declaration, but is also an
unary operator that can start an expression. Consume all
but the last of a possible series of __extension__ to
determine which. */
while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD
&& (c_parser_peek_2nd_token (parser)->keyword
== RID_EXTENSION))
c_parser_consume_token (parser);
if (c_token_starts_declaration (c_parser_peek_2nd_token (parser)))
{
int ext;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
c_parser_declaration_or_fndef (parser, true, true, true, true,
true, &object_expression);
parser->objc_could_be_foreach_context = false;
restore_extension_diagnostics (ext);
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
c_parser_consume_token (parser);
is_foreach_statement = true;
if (check_for_loop_decls (for_loc, true) == NULL_TREE)
c_parser_error (parser, "multiple iterating variables in fast enumeration");
}
else
check_for_loop_decls (for_loc, flag_isoc99);
}
else
goto init_expr;
}
else
{
init_expr:
{
tree init_expression;
init_expression = c_parser_expression (parser).value;
parser->objc_could_be_foreach_context = false;
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
c_parser_consume_token (parser);
is_foreach_statement = true;
if (! lvalue_p (init_expression))
c_parser_error (parser, "invalid iterating variable in fast enumeration");
object_expression = c_fully_fold (init_expression, false, NULL);
}
else
{
c_finish_expr_stmt (loc, init_expression);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
}
}
/* Parse the loop condition. In the case of a foreach
statement, there is no loop condition. */
gcc_assert (!parser->objc_could_be_foreach_context);
if (!is_foreach_statement)
{
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
cond = NULL_TREE;
}
else
{
cond = c_parser_condition (parser);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
}
/* Parse the increment expression (the third expression in a
for-statement). In the case of a foreach-statement, this is
the expression that follows the 'in'. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
if (is_foreach_statement)
{
c_parser_error (parser, "missing collection in fast enumeration");
collection_expression = error_mark_node;
}
else
incr = c_process_expr_stmt (loc, NULL_TREE);
}
else
{
if (is_foreach_statement)
collection_expression = c_fully_fold (c_parser_expression (parser).value,
false, NULL);
else
incr = c_process_expr_stmt (loc, c_parser_expression (parser).value);
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
if (is_foreach_statement)
objc_finish_foreach_loop (loc, object_expression, collection_expression, body, c_break_label, c_cont_label);
else
c_finish_loop (loc, cond, incr, body, c_break_label, c_cont_label, true);
add_stmt (c_end_compound_stmt (loc, block, flag_isoc99 || c_dialect_objc ()));
c_break_label = save_break;
c_cont_label = save_cont;
}
/* Parse an asm statement, a GNU extension. This is a full-blown asm
statement with inputs, outputs, clobbers, and volatile tag
allowed.
asm-statement:
asm type-qualifier[opt] ( asm-argument ) ;
asm type-qualifier[opt] goto ( asm-goto-argument ) ;
asm-argument:
asm-string-literal
asm-string-literal : asm-operands[opt]
asm-string-literal : asm-operands[opt] : asm-operands[opt]
asm-string-literal : asm-operands[opt] : asm-operands[opt] : asm-clobbers[opt]
asm-goto-argument:
asm-string-literal : : asm-operands[opt] : asm-clobbers[opt] \
: asm-goto-operands
Qualifiers other than volatile are accepted in the syntax but
warned for. */
static tree
c_parser_asm_statement (c_parser *parser)
{
tree quals, str, outputs, inputs, clobbers, labels, ret;
bool simple, is_goto;
location_t asm_loc = c_parser_peek_token (parser)->location;
int section, nsections;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM));
c_parser_consume_token (parser);
if (c_parser_next_token_is_keyword (parser, RID_VOLATILE))
{
quals = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is_keyword (parser, RID_CONST)
|| c_parser_next_token_is_keyword (parser, RID_RESTRICT))
{
warning_at (c_parser_peek_token (parser)->location,
0,
"%E qualifier ignored on asm",
c_parser_peek_token (parser)->value);
quals = NULL_TREE;
c_parser_consume_token (parser);
}
else
quals = NULL_TREE;
is_goto = false;
if (c_parser_next_token_is_keyword (parser, RID_GOTO))
{
c_parser_consume_token (parser);
is_goto = true;
}
/* ??? Follow the C++ parser rather than using the
lex_untranslated_string kludge. */
parser->lex_untranslated_string = true;
ret = NULL;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
goto error;
str = c_parser_asm_string_literal (parser);
if (str == NULL_TREE)
goto error_close_paren;
simple = true;
outputs = NULL_TREE;
inputs = NULL_TREE;
clobbers = NULL_TREE;
labels = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto)
goto done_asm;
/* Parse each colon-delimited section of operands. */
nsections = 3 + is_goto;
for (section = 0; section < nsections; ++section)
{
if (!c_parser_require (parser, CPP_COLON,
is_goto
? "expected %<:%>"
: "expected %<:%> or %<)%>"))
goto error_close_paren;
/* Once past any colon, we're no longer a simple asm. */
simple = false;
if ((!c_parser_next_token_is (parser, CPP_COLON)
&& !c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
|| section == 3)
switch (section)
{
case 0:
/* For asm goto, we don't allow output operands, but reserve
the slot for a future extension that does allow them. */
if (!is_goto)
outputs = c_parser_asm_operands (parser, false);
break;
case 1:
inputs = c_parser_asm_operands (parser, true);
break;
case 2:
clobbers = c_parser_asm_clobbers (parser);
break;
case 3:
labels = c_parser_asm_goto_operands (parser);
break;
default:
gcc_unreachable ();
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto)
goto done_asm;
}
done_asm:
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto error;
}
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
ret = build_asm_stmt (quals, build_asm_expr (asm_loc, str, outputs, inputs,
clobbers, labels, simple));
error:
parser->lex_untranslated_string = false;
return ret;
error_close_paren:
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto error;
}
/* Parse asm operands, a GNU extension. If CONVERT_P (for inputs but
not outputs), apply the default conversion of functions and arrays
to pointers.
asm-operands:
asm-operand
asm-operands , asm-operand
asm-operand:
asm-string-literal ( expression )
[ identifier ] asm-string-literal ( expression )
*/
static tree
c_parser_asm_operands (c_parser *parser, bool convert_p)
{
tree list = NULL_TREE;
location_t loc;
while (true)
{
tree name, str;
struct c_expr expr;
if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
name = build_string (IDENTIFIER_LENGTH (id),
IDENTIFIER_POINTER (id));
}
else
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL);
return NULL_TREE;
}
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
}
else
name = NULL_TREE;
str = c_parser_asm_string_literal (parser);
if (str == NULL_TREE)
return NULL_TREE;
parser->lex_untranslated_string = false;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
parser->lex_untranslated_string = true;
return NULL_TREE;
}
loc = c_parser_peek_token (parser)->location;
expr = c_parser_expression (parser);
mark_exp_read (expr.value);
if (convert_p)
expr = default_function_array_conversion (loc, expr);
expr.value = c_fully_fold (expr.value, false, NULL);
parser->lex_untranslated_string = true;
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
list = chainon (list, build_tree_list (build_tree_list (name, str),
expr.value));
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
return list;
}
/* Parse asm clobbers, a GNU extension.
asm-clobbers:
asm-string-literal
asm-clobbers , asm-string-literal
*/
static tree
c_parser_asm_clobbers (c_parser *parser)
{
tree list = NULL_TREE;
while (true)
{
tree str = c_parser_asm_string_literal (parser);
if (str)
list = tree_cons (NULL_TREE, str, list);
else
return NULL_TREE;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
return list;
}
/* Parse asm goto labels, a GNU extension.
asm-goto-operands:
identifier
asm-goto-operands , identifier
*/
static tree
c_parser_asm_goto_operands (c_parser *parser)
{
tree list = NULL_TREE;
while (true)
{
tree name, label;
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *tok = c_parser_peek_token (parser);
name = tok->value;
label = lookup_label_for_goto (tok->location, name);
c_parser_consume_token (parser);
TREE_USED (label) = 1;
}
else
{
c_parser_error (parser, "expected identifier");
return NULL_TREE;
}
name = build_string (IDENTIFIER_LENGTH (name),
IDENTIFIER_POINTER (name));
list = tree_cons (name, label, list);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
return nreverse (list);
}
}
/* Parse an expression other than a compound expression; that is, an
assignment expression (C90 6.3.16, C99 6.5.16). If AFTER is not
NULL then it is an Objective-C message expression which is the
primary-expression starting the expression as an initializer.
assignment-expression:
conditional-expression
unary-expression assignment-operator assignment-expression
assignment-operator: one of
= *= /= %= += -= <<= >>= &= ^= |=
In GNU C we accept any conditional expression on the LHS and
diagnose the invalid lvalue rather than producing a syntax
error. */
static struct c_expr
c_parser_expr_no_commas (c_parser *parser, struct c_expr *after)
{
struct c_expr lhs, rhs, ret;
enum tree_code code;
location_t op_location, exp_location;
gcc_assert (!after || c_dialect_objc ());
lhs = c_parser_conditional_expression (parser, after);
op_location = c_parser_peek_token (parser)->location;
switch (c_parser_peek_token (parser)->type)
{
case CPP_EQ:
code = NOP_EXPR;
break;
case CPP_MULT_EQ:
code = MULT_EXPR;
break;
case CPP_DIV_EQ:
code = TRUNC_DIV_EXPR;
break;
case CPP_MOD_EQ:
code = TRUNC_MOD_EXPR;
break;
case CPP_PLUS_EQ:
code = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
code = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
code = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
code = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
code = BIT_AND_EXPR;
break;
case CPP_XOR_EQ:
code = BIT_XOR_EXPR;
break;
case CPP_OR_EQ:
code = BIT_IOR_EXPR;
break;
default:
return lhs;
}
c_parser_consume_token (parser);
exp_location = c_parser_peek_token (parser)->location;
rhs = c_parser_expr_no_commas (parser, NULL);
rhs = default_function_array_read_conversion (exp_location, rhs);
ret.value = build_modify_expr (op_location, lhs.value, lhs.original_type,
code, exp_location, rhs.value,
rhs.original_type);
if (code == NOP_EXPR)
ret.original_code = MODIFY_EXPR;
else
{
TREE_NO_WARNING (ret.value) = 1;
ret.original_code = ERROR_MARK;
}
ret.original_type = NULL;
return ret;
}
/* Parse a conditional expression (C90 6.3.15, C99 6.5.15). If AFTER
is not NULL then it is an Objective-C message expression which is
the primary-expression starting the expression as an initializer.
conditional-expression:
logical-OR-expression
logical-OR-expression ? expression : conditional-expression
GNU extensions:
conditional-expression:
logical-OR-expression ? : conditional-expression
*/
static struct c_expr
c_parser_conditional_expression (c_parser *parser, struct c_expr *after)
{
struct c_expr cond, exp1, exp2, ret;
location_t cond_loc, colon_loc, middle_loc;
gcc_assert (!after || c_dialect_objc ());
cond = c_parser_binary_expression (parser, after);
if (c_parser_next_token_is_not (parser, CPP_QUERY))
return cond;
cond_loc = c_parser_peek_token (parser)->location;
cond = default_function_array_read_conversion (cond_loc, cond);
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COLON))
{
tree eptype = NULL_TREE;
middle_loc = c_parser_peek_token (parser)->location;
pedwarn (middle_loc, OPT_pedantic,
"ISO C forbids omitting the middle term of a ?: expression");
warn_for_omitted_condop (middle_loc, cond.value);
if (TREE_CODE (cond.value) == EXCESS_PRECISION_EXPR)
{
eptype = TREE_TYPE (cond.value);
cond.value = TREE_OPERAND (cond.value, 0);
}
/* Make sure first operand is calculated only once. */
exp1.value = c_save_expr (default_conversion (cond.value));
if (eptype)
exp1.value = build1 (EXCESS_PRECISION_EXPR, eptype, exp1.value);
exp1.original_type = NULL;
cond.value = c_objc_common_truthvalue_conversion (cond_loc, exp1.value);
c_inhibit_evaluation_warnings += cond.value == truthvalue_true_node;
}
else
{
cond.value
= c_objc_common_truthvalue_conversion
(cond_loc, default_conversion (cond.value));
c_inhibit_evaluation_warnings += cond.value == truthvalue_false_node;
exp1 = c_parser_expression_conv (parser);
mark_exp_read (exp1.value);
c_inhibit_evaluation_warnings +=
((cond.value == truthvalue_true_node)
- (cond.value == truthvalue_false_node));
}
colon_loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node;
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
{
location_t exp2_loc = c_parser_peek_token (parser)->location;
exp2 = c_parser_conditional_expression (parser, NULL);
exp2 = default_function_array_read_conversion (exp2_loc, exp2);
}
c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node;
ret.value = build_conditional_expr (colon_loc, cond.value,
cond.original_code == C_MAYBE_CONST_EXPR,
exp1.value, exp1.original_type,
exp2.value, exp2.original_type);
ret.original_code = ERROR_MARK;
if (exp1.value == error_mark_node || exp2.value == error_mark_node)
ret.original_type = NULL;
else
{
tree t1, t2;
/* If both sides are enum type, the default conversion will have
made the type of the result be an integer type. We want to
remember the enum types we started with. */
t1 = exp1.original_type ? exp1.original_type : TREE_TYPE (exp1.value);
t2 = exp2.original_type ? exp2.original_type : TREE_TYPE (exp2.value);
ret.original_type = ((t1 != error_mark_node
&& t2 != error_mark_node
&& (TYPE_MAIN_VARIANT (t1)
== TYPE_MAIN_VARIANT (t2)))
? t1
: NULL);
}
return ret;
}
/* Parse a binary expression; that is, a logical-OR-expression (C90
6.3.5-6.3.14, C99 6.5.5-6.5.14). If AFTER is not NULL then it is
an Objective-C message expression which is the primary-expression
starting the expression as an initializer.
multiplicative-expression:
cast-expression
multiplicative-expression * cast-expression
multiplicative-expression / cast-expression
multiplicative-expression % cast-expression
additive-expression:
multiplicative-expression
additive-expression + multiplicative-expression
additive-expression - multiplicative-expression
shift-expression:
additive-expression
shift-expression << additive-expression
shift-expression >> additive-expression
relational-expression:
shift-expression
relational-expression < shift-expression
relational-expression > shift-expression
relational-expression <= shift-expression
relational-expression >= shift-expression
equality-expression:
relational-expression
equality-expression == relational-expression
equality-expression != relational-expression
AND-expression:
equality-expression
AND-expression & equality-expression
exclusive-OR-expression:
AND-expression
exclusive-OR-expression ^ AND-expression
inclusive-OR-expression:
exclusive-OR-expression
inclusive-OR-expression | exclusive-OR-expression
logical-AND-expression:
inclusive-OR-expression
logical-AND-expression && inclusive-OR-expression
logical-OR-expression:
logical-AND-expression
logical-OR-expression || logical-AND-expression
*/
static struct c_expr
c_parser_binary_expression (c_parser *parser, struct c_expr *after)
{
/* A binary expression is parsed using operator-precedence parsing,
with the operands being cast expressions. All the binary
operators are left-associative. Thus a binary expression is of
form:
E0 op1 E1 op2 E2 ...
which we represent on a stack. On the stack, the precedence
levels are strictly increasing. When a new operator is
encountered of higher precedence than that at the top of the
stack, it is pushed; its LHS is the top expression, and its RHS
is everything parsed until it is popped. When a new operator is
encountered with precedence less than or equal to that at the top
of the stack, triples E[i-1] op[i] E[i] are popped and replaced
by the result of the operation until the operator at the top of
the stack has lower precedence than the new operator or there is
only one element on the stack; then the top expression is the LHS
of the new operator. In the case of logical AND and OR
expressions, we also need to adjust c_inhibit_evaluation_warnings
as appropriate when the operators are pushed and popped. */
/* The precedence levels, where 0 is a dummy lowest level used for
the bottom of the stack. */
enum prec {
PREC_NONE,
PREC_LOGOR,
PREC_LOGAND,
PREC_BITOR,
PREC_BITXOR,
PREC_BITAND,
PREC_EQ,
PREC_REL,
PREC_SHIFT,
PREC_ADD,
PREC_MULT,
NUM_PRECS
};
struct {
/* The expression at this stack level. */
struct c_expr expr;
/* The precedence of the operator on its left, PREC_NONE at the
bottom of the stack. */
enum prec prec;
/* The operation on its left. */
enum tree_code op;
/* The source location of this operation. */
location_t loc;
} stack[NUM_PRECS];
int sp;
/* Location of the binary operator. */
location_t binary_loc = UNKNOWN_LOCATION; /* Quiet warning. */
#define POP \
do { \
switch (stack[sp].op) \
{ \
case TRUTH_ANDIF_EXPR: \
c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \
== truthvalue_false_node); \
break; \
case TRUTH_ORIF_EXPR: \
c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \
== truthvalue_true_node); \
break; \
default: \
break; \
} \
stack[sp - 1].expr \
= default_function_array_read_conversion (stack[sp - 1].loc, \
stack[sp - 1].expr); \
stack[sp].expr \
= default_function_array_read_conversion (stack[sp].loc, \
stack[sp].expr); \
stack[sp - 1].expr = parser_build_binary_op (stack[sp].loc, \
stack[sp].op, \
stack[sp - 1].expr, \
stack[sp].expr); \
sp--; \
} while (0)
gcc_assert (!after || c_dialect_objc ());
stack[0].loc = c_parser_peek_token (parser)->location;
stack[0].expr = c_parser_cast_expression (parser, after);
stack[0].prec = PREC_NONE;
sp = 0;
while (true)
{
enum prec oprec;
enum tree_code ocode;
if (parser->error)
goto out;
switch (c_parser_peek_token (parser)->type)
{
case CPP_MULT:
oprec = PREC_MULT;
ocode = MULT_EXPR;
break;
case CPP_DIV:
oprec = PREC_MULT;
ocode = TRUNC_DIV_EXPR;
break;
case CPP_MOD:
oprec = PREC_MULT;
ocode = TRUNC_MOD_EXPR;
break;
case CPP_PLUS:
oprec = PREC_ADD;
ocode = PLUS_EXPR;
break;
case CPP_MINUS:
oprec = PREC_ADD;
ocode = MINUS_EXPR;
break;
case CPP_LSHIFT:
oprec = PREC_SHIFT;
ocode = LSHIFT_EXPR;
break;
case CPP_RSHIFT:
oprec = PREC_SHIFT;
ocode = RSHIFT_EXPR;
break;
case CPP_LESS:
oprec = PREC_REL;
ocode = LT_EXPR;
break;
case CPP_GREATER:
oprec = PREC_REL;
ocode = GT_EXPR;
break;
case CPP_LESS_EQ:
oprec = PREC_REL;
ocode = LE_EXPR;
break;
case CPP_GREATER_EQ:
oprec = PREC_REL;
ocode = GE_EXPR;
break;
case CPP_EQ_EQ:
oprec = PREC_EQ;
ocode = EQ_EXPR;
break;
case CPP_NOT_EQ:
oprec = PREC_EQ;
ocode = NE_EXPR;
break;
case CPP_AND:
oprec = PREC_BITAND;
ocode = BIT_AND_EXPR;
break;
case CPP_XOR:
oprec = PREC_BITXOR;
ocode = BIT_XOR_EXPR;
break;
case CPP_OR:
oprec = PREC_BITOR;
ocode = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
oprec = PREC_LOGAND;
ocode = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
oprec = PREC_LOGOR;
ocode = TRUTH_ORIF_EXPR;
break;
default:
/* Not a binary operator, so end of the binary
expression. */
goto out;
}
binary_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
while (oprec <= stack[sp].prec)
POP;
switch (ocode)
{
case TRUTH_ANDIF_EXPR:
stack[sp].expr
= default_function_array_read_conversion (stack[sp].loc,
stack[sp].expr);
stack[sp].expr.value = c_objc_common_truthvalue_conversion
(stack[sp].loc, default_conversion (stack[sp].expr.value));
c_inhibit_evaluation_warnings += (stack[sp].expr.value
== truthvalue_false_node);
break;
case TRUTH_ORIF_EXPR:
stack[sp].expr
= default_function_array_read_conversion (stack[sp].loc,
stack[sp].expr);
stack[sp].expr.value = c_objc_common_truthvalue_conversion
(stack[sp].loc, default_conversion (stack[sp].expr.value));
c_inhibit_evaluation_warnings += (stack[sp].expr.value
== truthvalue_true_node);
break;
default:
break;
}
sp++;
stack[sp].loc = binary_loc;
stack[sp].expr = c_parser_cast_expression (parser, NULL);
stack[sp].prec = oprec;
stack[sp].op = ocode;
stack[sp].loc = binary_loc;
}
out:
while (sp > 0)
POP;
return stack[0].expr;
#undef POP
}
/* Parse a cast expression (C90 6.3.4, C99 6.5.4). If AFTER is not
NULL then it is an Objective-C message expression which is the
primary-expression starting the expression as an initializer.
cast-expression:
unary-expression
( type-name ) unary-expression
*/
static struct c_expr
c_parser_cast_expression (c_parser *parser, struct c_expr *after)
{
location_t cast_loc = c_parser_peek_token (parser)->location;
gcc_assert (!after || c_dialect_objc ());
if (after)
return c_parser_postfix_expression_after_primary (parser,
cast_loc, *after);
/* If the expression begins with a parenthesized type name, it may
be either a cast or a compound literal; we need to see whether
the next character is '{' to tell the difference. If not, it is
an unary expression. Full detection of unknown typenames here
would require a 3-token lookahead. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
struct c_type_name *type_name;
struct c_expr ret;
struct c_expr expr;
c_parser_consume_token (parser);
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (type_name == NULL)
{
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
/* Save casted types in the function's used types hash table. */
used_types_insert (type_name->specs->type);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return c_parser_postfix_expression_after_paren_type (parser, type_name,
cast_loc);
{
location_t expr_loc = c_parser_peek_token (parser)->location;
expr = c_parser_cast_expression (parser, NULL);
expr = default_function_array_read_conversion (expr_loc, expr);
}
ret.value = c_cast_expr (cast_loc, type_name, expr.value);
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
else
return c_parser_unary_expression (parser);
}
/* Parse an unary expression (C90 6.3.3, C99 6.5.3).
unary-expression:
postfix-expression
++ unary-expression
-- unary-expression
unary-operator cast-expression
sizeof unary-expression
sizeof ( type-name )
unary-operator: one of
& * + - ~ !
GNU extensions:
unary-expression:
__alignof__ unary-expression
__alignof__ ( type-name )
&& identifier
unary-operator: one of
__extension__ __real__ __imag__
In addition, the GNU syntax treats ++ and -- as unary operators, so
they may be applied to cast expressions with errors for non-lvalues
given later. */
static struct c_expr
c_parser_unary_expression (c_parser *parser)
{
int ext;
struct c_expr ret, op;
location_t op_loc = c_parser_peek_token (parser)->location;
location_t exp_loc;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS_PLUS:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, PREINCREMENT_EXPR, op);
case CPP_MINUS_MINUS:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, PREDECREMENT_EXPR, op);
case CPP_AND:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
mark_exp_read (op.value);
return parser_build_unary_op (op_loc, ADDR_EXPR, op);
case CPP_MULT:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
ret.value = build_indirect_ref (op_loc, op.value, RO_UNARY_STAR);
return ret;
case CPP_PLUS:
if (!c_dialect_objc () && !in_system_header)
warning_at (op_loc,
OPT_Wtraditional,
"traditional C rejects the unary plus operator");
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, CONVERT_EXPR, op);
case CPP_MINUS:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, NEGATE_EXPR, op);
case CPP_COMPL:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, BIT_NOT_EXPR, op);
case CPP_NOT:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, TRUTH_NOT_EXPR, op);
case CPP_AND_AND:
/* Refer to the address of a label as a pointer. */
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
ret.value = finish_label_address_expr
(c_parser_peek_token (parser)->value, op_loc);
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected identifier");
ret.value = error_mark_node;
}
return ret;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_SIZEOF:
return c_parser_sizeof_expression (parser);
case RID_ALIGNOF:
return c_parser_alignof_expression (parser);
case RID_EXTENSION:
c_parser_consume_token (parser);
ext = disable_extension_diagnostics ();
ret = c_parser_cast_expression (parser, NULL);
restore_extension_diagnostics (ext);
return ret;
case RID_REALPART:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, REALPART_EXPR, op);
case RID_IMAGPART:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, IMAGPART_EXPR, op);
default:
return c_parser_postfix_expression (parser);
}
default:
return c_parser_postfix_expression (parser);
}
}
/* Parse a sizeof expression. */
static struct c_expr
c_parser_sizeof_expression (c_parser *parser)
{
struct c_expr expr;
location_t expr_loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_SIZEOF));
c_parser_consume_token (parser);
c_inhibit_evaluation_warnings++;
in_sizeof++;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* Either sizeof ( type-name ) or sizeof unary-expression
starting with a compound literal. */
struct c_type_name *type_name;
c_parser_consume_token (parser);
expr_loc = c_parser_peek_token (parser)->location;
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (type_name == NULL)
{
struct c_expr ret;
c_inhibit_evaluation_warnings--;
in_sizeof--;
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name,
expr_loc);
goto sizeof_expr;
}
/* sizeof ( type-name ). */
c_inhibit_evaluation_warnings--;
in_sizeof--;
return c_expr_sizeof_type (expr_loc, type_name);
}
else
{
expr_loc = c_parser_peek_token (parser)->location;
expr = c_parser_unary_expression (parser);
sizeof_expr:
c_inhibit_evaluation_warnings--;
in_sizeof--;
mark_exp_read (expr.value);
if (TREE_CODE (expr.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1)))
error_at (expr_loc, "%<sizeof%> applied to a bit-field");
return c_expr_sizeof_expr (expr_loc, expr);
}
}
/* Parse an alignof expression. */
static struct c_expr
c_parser_alignof_expression (c_parser *parser)
{
struct c_expr expr;
location_t loc = c_parser_peek_token (parser)->location;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNOF));
c_parser_consume_token (parser);
c_inhibit_evaluation_warnings++;
in_alignof++;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* Either __alignof__ ( type-name ) or __alignof__
unary-expression starting with a compound literal. */
location_t loc;
struct c_type_name *type_name;
struct c_expr ret;
c_parser_consume_token (parser);
loc = c_parser_peek_token (parser)->location;
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (type_name == NULL)
{
struct c_expr ret;
c_inhibit_evaluation_warnings--;
in_alignof--;
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name,
loc);
goto alignof_expr;
}
/* alignof ( type-name ). */
c_inhibit_evaluation_warnings--;
in_alignof--;
ret.value = c_alignof (loc, groktypename (type_name, NULL, NULL));
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
else
{
struct c_expr ret;
expr = c_parser_unary_expression (parser);
alignof_expr:
mark_exp_read (expr.value);
c_inhibit_evaluation_warnings--;
in_alignof--;
ret.value = c_alignof_expr (loc, expr.value);
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
}
/* Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2).
postfix-expression:
primary-expression
postfix-expression [ expression ]
postfix-expression ( argument-expression-list[opt] )
postfix-expression . identifier
postfix-expression -> identifier
postfix-expression ++
postfix-expression --
( type-name ) { initializer-list }
( type-name ) { initializer-list , }
argument-expression-list:
argument-expression
argument-expression-list , argument-expression
primary-expression:
identifier
constant
string-literal
( expression )
GNU extensions:
primary-expression:
__func__
(treated as a keyword in GNU C)
__FUNCTION__
__PRETTY_FUNCTION__
( compound-statement )
__builtin_va_arg ( assignment-expression , type-name )
__builtin_offsetof ( type-name , offsetof-member-designator )
__builtin_choose_expr ( assignment-expression ,
assignment-expression ,
assignment-expression )
__builtin_types_compatible_p ( type-name , type-name )
offsetof-member-designator:
identifier
offsetof-member-designator . identifier
offsetof-member-designator [ expression ]
Objective-C:
primary-expression:
[ objc-receiver objc-message-args ]
@selector ( objc-selector-arg )
@protocol ( identifier )
@encode ( type-name )
objc-string-literal
Classname . identifier
*/
static struct c_expr
c_parser_postfix_expression (c_parser *parser)
{
struct c_expr expr, e1, e2, e3;
struct c_type_name *t1, *t2;
location_t loc = c_parser_peek_token (parser)->location;;
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
switch (c_parser_peek_token (parser)->type)
{
case CPP_NUMBER:
expr.value = c_parser_peek_token (parser)->value;
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (TREE_CODE (expr.value) == FIXED_CST
&& !targetm.fixed_point_supported_p ())
{
error_at (loc, "fixed-point types not supported for this target");
expr.value = error_mark_node;
}
break;
case CPP_CHAR:
case CPP_CHAR16:
case CPP_CHAR32:
case CPP_WCHAR:
expr.value = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
break;
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
expr.value = c_parser_peek_token (parser)->value;
expr.original_code = STRING_CST;
c_parser_consume_token (parser);
break;
case CPP_OBJC_STRING:
gcc_assert (c_dialect_objc ());
expr.value
= objc_build_string_object (c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case CPP_NAME:
switch (c_parser_peek_token (parser)->id_kind)
{
case C_ID_ID:
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
expr.value = build_external_ref (loc, id,
(c_parser_peek_token (parser)->type
== CPP_OPEN_PAREN),
&expr.original_type);
break;
}
case C_ID_CLASSNAME:
{
/* Here we parse the Objective-C 2.0 Class.name dot
syntax. */
tree class_name = c_parser_peek_token (parser)->value;
tree component;
c_parser_consume_token (parser);
gcc_assert (c_dialect_objc ());
if (!c_parser_require (parser, CPP_DOT, "expected %<.%>"))
{
expr.value = error_mark_node;
break;
}
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
expr.value = error_mark_node;
break;
}
component = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
expr.value = objc_build_class_component_ref (class_name,
component);
break;
}
default:
c_parser_error (parser, "expected expression");
expr.value = error_mark_node;
break;
}
break;
case CPP_OPEN_PAREN:
/* A parenthesized expression, statement expression or compound
literal. */
if (c_parser_peek_2nd_token (parser)->type == CPP_OPEN_BRACE)
{
/* A statement expression. */
tree stmt;
location_t brace_loc;
c_parser_consume_token (parser);
brace_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (cur_stmt_list == NULL)
{
error_at (loc, "braced-group within expression allowed "
"only inside a function");
parser->error = true;
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
break;
}
stmt = c_begin_stmt_expr ();
c_parser_compound_statement_nostart (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
pedwarn (loc, OPT_pedantic,
"ISO C forbids braced-groups within expressions");
expr.value = c_finish_stmt_expr (brace_loc, stmt);
mark_exp_read (expr.value);
}
else if (c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* A compound literal. ??? Can we actually get here rather
than going directly to
c_parser_postfix_expression_after_paren_type from
elsewhere? */
location_t loc;
struct c_type_name *type_name;
c_parser_consume_token (parser);
loc = c_parser_peek_token (parser)->location;
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (type_name == NULL)
{
expr.value = error_mark_node;
}
else
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name,
loc);
}
else
{
/* A parenthesized expression. */
c_parser_consume_token (parser);
expr = c_parser_expression (parser);
if (TREE_CODE (expr.value) == MODIFY_EXPR)
TREE_NO_WARNING (expr.value) = 1;
if (expr.original_code != C_MAYBE_CONST_EXPR)
expr.original_code = ERROR_MARK;
/* Don't change EXPR.ORIGINAL_TYPE. */
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
}
break;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_FUNCTION_NAME:
case RID_PRETTY_FUNCTION_NAME:
case RID_C99_FUNCTION_NAME:
expr.value = fname_decl (loc,
c_parser_peek_token (parser)->keyword,
c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case RID_VA_ARG:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
break;
}
e1 = c_parser_expr_no_commas (parser, NULL);
mark_exp_read (e1.value);
e1.value = c_fully_fold (e1.value, false, NULL);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
break;
}
loc = c_parser_peek_token (parser)->location;
t1 = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (t1 == NULL)
{
expr.value = error_mark_node;
}
else
{
tree type_expr = NULL_TREE;
expr.value = c_build_va_arg (loc, e1.value,
groktypename (t1, &type_expr, NULL));
if (type_expr)
{
expr.value = build2 (C_MAYBE_CONST_EXPR,
TREE_TYPE (expr.value), type_expr,
expr.value);
C_MAYBE_CONST_EXPR_NON_CONST (expr.value) = true;
}
}
break;
case RID_OFFSETOF:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
parser->error = true;
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
gcc_assert (parser->error);
if (parser->error)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
break;
}
{
tree type = groktypename (t1, NULL, NULL);
tree offsetof_ref;
if (type == error_mark_node)
offsetof_ref = error_mark_node;
else
{
offsetof_ref = build1 (INDIRECT_REF, type, null_pointer_node);
SET_EXPR_LOCATION (offsetof_ref, loc);
}
/* Parse the second argument to __builtin_offsetof. We
must have one identifier, and beyond that we want to
accept sub structure and sub array references. */
if (c_parser_next_token_is (parser, CPP_NAME))
{
offsetof_ref = build_component_ref
(loc, offsetof_ref, c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
while (c_parser_next_token_is (parser, CPP_DOT)
|| c_parser_next_token_is (parser,
CPP_OPEN_SQUARE)
|| c_parser_next_token_is (parser,
CPP_DEREF))
{
if (c_parser_next_token_is (parser, CPP_DEREF))
{
loc = c_parser_peek_token (parser)->location;
offsetof_ref = build_array_ref (loc,
offsetof_ref,
integer_zero_node);
goto do_dot;
}
else if (c_parser_next_token_is (parser, CPP_DOT))
{
do_dot:
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser,
CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
offsetof_ref = build_component_ref
(loc, offsetof_ref,
c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else
{
tree idx;
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
idx = c_parser_expression (parser).value;
idx = c_fully_fold (idx, false, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
offsetof_ref = build_array_ref (loc, offsetof_ref, idx);
}
}
}
else
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = fold_offsetof (offsetof_ref, NULL_TREE);
}
break;
case RID_CHOOSE_EXPR:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
break;
}
loc = c_parser_peek_token (parser)->location;
e1 = c_parser_expr_no_commas (parser, NULL);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
break;
}
e2 = c_parser_expr_no_commas (parser, NULL);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
break;
}
e3 = c_parser_expr_no_commas (parser, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
{
tree c;
c = e1.value;
mark_exp_read (e2.value);
mark_exp_read (e3.value);
if (TREE_CODE (c) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (c)))
error_at (loc,
"first argument to %<__builtin_choose_expr%> not"
" a constant");
constant_expression_warning (c);
expr = integer_zerop (c) ? e3 : e2;
}
break;
case RID_TYPES_COMPATIBLE_P:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.value = error_mark_node;
break;
}
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
break;
}
t2 = c_parser_type_name (parser);
if (t2 == NULL)
{
expr.value = error_mark_node;
break;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
{
tree e1, e2;
e1 = TYPE_MAIN_VARIANT (groktypename (t1, NULL, NULL));
e2 = TYPE_MAIN_VARIANT (groktypename (t2, NULL, NULL));
expr.value
= comptypes (e1, e2) ? integer_one_node : integer_zero_node;
}
break;
case RID_AT_SELECTOR:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
break;
}
{
tree sel = c_parser_objc_selector_arg (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = objc_build_selector_expr (loc, sel);
}
break;
case RID_AT_PROTOCOL:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
break;
}
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
break;
}
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = objc_build_protocol_expr (id);
}
break;
case RID_AT_ENCODE:
/* Extension to support C-structures in the archiver. */
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.value = error_mark_node;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
break;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
{
tree type = groktypename (t1, NULL, NULL);
expr.value = objc_build_encode_expr (type);
}
break;
default:
c_parser_error (parser, "expected expression");
expr.value = error_mark_node;
break;
}
break;
case CPP_OPEN_SQUARE:
if (c_dialect_objc ())
{
tree receiver, args;
c_parser_consume_token (parser);
receiver = c_parser_objc_receiver (parser);
args = c_parser_objc_message_args (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
expr.value = objc_build_message_expr (build_tree_list (receiver,
args));
break;
}
/* Else fall through to report error. */
default:
c_parser_error (parser, "expected expression");
expr.value = error_mark_node;
break;
}
return c_parser_postfix_expression_after_primary (parser, loc, expr);
}
/* Parse a postfix expression after a parenthesized type name: the
brace-enclosed initializer of a compound literal, possibly followed
by some postfix operators. This is separate because it is not
possible to tell until after the type name whether a cast
expression has a cast or a compound literal, or whether the operand
of sizeof is a parenthesized type name or starts with a compound
literal. TYPE_LOC is the location where TYPE_NAME starts--the
location of the first token after the parentheses around the type
name. */
static struct c_expr
c_parser_postfix_expression_after_paren_type (c_parser *parser,
struct c_type_name *type_name,
location_t type_loc)
{
tree type;
struct c_expr init;
bool non_const;
struct c_expr expr;
location_t start_loc;
tree type_expr = NULL_TREE;
bool type_expr_const = true;
check_compound_literal_type (type_loc, type_name);
start_init (NULL_TREE, NULL, 0);
type = groktypename (type_name, &type_expr, &type_expr_const);
start_loc = c_parser_peek_token (parser)->location;
if (type != error_mark_node && C_TYPE_VARIABLE_SIZE (type))
{
error_at (type_loc, "compound literal has variable size");
type = error_mark_node;
}
init = c_parser_braced_init (parser, type, false);
finish_init ();
maybe_warn_string_init (type, init);
if (type != error_mark_node
&& !ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (type))
&& current_function_decl)
{
error ("compound literal qualified by address-space qualifier");
type = error_mark_node;
}
if (!flag_isoc99)
pedwarn (start_loc, OPT_pedantic, "ISO C90 forbids compound literals");
non_const = ((init.value && TREE_CODE (init.value) == CONSTRUCTOR)
? CONSTRUCTOR_NON_CONST (init.value)
: init.original_code == C_MAYBE_CONST_EXPR);
non_const |= !type_expr_const;
expr.value = build_compound_literal (start_loc, type, init.value, non_const);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
if (type_expr)
{
if (TREE_CODE (expr.value) == C_MAYBE_CONST_EXPR)
{
gcc_assert (C_MAYBE_CONST_EXPR_PRE (expr.value) == NULL_TREE);
C_MAYBE_CONST_EXPR_PRE (expr.value) = type_expr;
}
else
{
gcc_assert (!non_const);
expr.value = build2 (C_MAYBE_CONST_EXPR, type,
type_expr, expr.value);
}
}
return c_parser_postfix_expression_after_primary (parser, start_loc, expr);
}
/* Parse a postfix expression after the initial primary or compound
literal; that is, parse a series of postfix operators.
EXPR_LOC is the location of the primary expression. */
static struct c_expr
c_parser_postfix_expression_after_primary (c_parser *parser,
location_t expr_loc,
struct c_expr expr)
{
struct c_expr orig_expr;
tree ident, idx;
VEC(tree,gc) *exprlist;
VEC(tree,gc) *origtypes;
while (true)
{
location_t op_loc = c_parser_peek_token (parser)->location;
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_SQUARE:
/* Array reference. */
c_parser_consume_token (parser);
idx = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
expr.value = build_array_ref (op_loc, expr.value, idx);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
break;
case CPP_OPEN_PAREN:
/* Function call. */
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
exprlist = NULL;
else
exprlist = c_parser_expr_list (parser, true, false, &origtypes);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
orig_expr = expr;
mark_exp_read (expr.value);
/* FIXME diagnostics: Ideally we want the FUNCNAME, not the
"(" after the FUNCNAME, which is what we have now. */
expr.value = build_function_call_vec (op_loc, expr.value, exprlist,
origtypes);
expr.original_code = ERROR_MARK;
if (TREE_CODE (expr.value) == INTEGER_CST
&& TREE_CODE (orig_expr.value) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (orig_expr.value) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (orig_expr.value) == BUILT_IN_CONSTANT_P)
expr.original_code = C_MAYBE_CONST_EXPR;
expr.original_type = NULL;
if (exprlist != NULL)
{
release_tree_vector (exprlist);
release_tree_vector (origtypes);
}
break;
case CPP_DOT:
/* Structure element reference. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr_loc, expr);
if (c_parser_next_token_is (parser, CPP_NAME))
ident = c_parser_peek_token (parser)->value;
else
{
c_parser_error (parser, "expected identifier");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
return expr;
}
c_parser_consume_token (parser);
expr.value = build_component_ref (op_loc, expr.value, ident);
expr.original_code = ERROR_MARK;
if (TREE_CODE (expr.value) != COMPONENT_REF)
expr.original_type = NULL;
else
{
/* Remember the original type of a bitfield. */
tree field = TREE_OPERAND (expr.value, 1);
if (TREE_CODE (field) != FIELD_DECL)
expr.original_type = NULL;
else
expr.original_type = DECL_BIT_FIELD_TYPE (field);
}
break;
case CPP_DEREF:
/* Structure element reference. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr_loc, expr);
if (c_parser_next_token_is (parser, CPP_NAME))
ident = c_parser_peek_token (parser)->value;
else
{
c_parser_error (parser, "expected identifier");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
return expr;
}
c_parser_consume_token (parser);
expr.value = build_component_ref (op_loc,
build_indirect_ref (op_loc,
expr.value,
RO_ARROW),
ident);
expr.original_code = ERROR_MARK;
if (TREE_CODE (expr.value) != COMPONENT_REF)
expr.original_type = NULL;
else
{
/* Remember the original type of a bitfield. */
tree field = TREE_OPERAND (expr.value, 1);
if (TREE_CODE (field) != FIELD_DECL)
expr.original_type = NULL;
else
expr.original_type = DECL_BIT_FIELD_TYPE (field);
}
break;
case CPP_PLUS_PLUS:
/* Postincrement. */
c_parser_consume_token (parser);
expr = default_function_array_read_conversion (expr_loc, expr);
expr.value = build_unary_op (op_loc,
POSTINCREMENT_EXPR, expr.value, 0);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
break;
case CPP_MINUS_MINUS:
/* Postdecrement. */
c_parser_consume_token (parser);
expr = default_function_array_read_conversion (expr_loc, expr);
expr.value = build_unary_op (op_loc,
POSTDECREMENT_EXPR, expr.value, 0);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
break;
default:
return expr;
}
}
}
/* Parse an expression (C90 6.3.17, C99 6.5.17).
expression:
assignment-expression
expression , assignment-expression
*/
static struct c_expr
c_parser_expression (c_parser *parser)
{
struct c_expr expr;
expr = c_parser_expr_no_commas (parser, NULL);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_expr next;
tree lhsval;
location_t loc = c_parser_peek_token (parser)->location;
location_t expr_loc;
c_parser_consume_token (parser);
expr_loc = c_parser_peek_token (parser)->location;
lhsval = expr.value;
while (TREE_CODE (lhsval) == COMPOUND_EXPR)
lhsval = TREE_OPERAND (lhsval, 1);
if (DECL_P (lhsval) || handled_component_p (lhsval))
mark_exp_read (lhsval);
next = c_parser_expr_no_commas (parser, NULL);
next = default_function_array_conversion (expr_loc, next);
expr.value = build_compound_expr (loc, expr.value, next.value);
expr.original_code = COMPOUND_EXPR;
expr.original_type = next.original_type;
}
return expr;
}
/* Parse an expression and convert functions or arrays to
pointers. */
static struct c_expr
c_parser_expression_conv (c_parser *parser)
{
struct c_expr expr;
location_t loc = c_parser_peek_token (parser)->location;
expr = c_parser_expression (parser);
expr = default_function_array_conversion (loc, expr);
return expr;
}
/* Parse a non-empty list of expressions. If CONVERT_P, convert
functions and arrays to pointers. If FOLD_P, fold the expressions.
nonempty-expr-list:
assignment-expression
nonempty-expr-list , assignment-expression
*/
static VEC(tree,gc) *
c_parser_expr_list (c_parser *parser, bool convert_p, bool fold_p,
VEC(tree,gc) **p_orig_types)
{
VEC(tree,gc) *ret;
VEC(tree,gc) *orig_types;
struct c_expr expr;
location_t loc = c_parser_peek_token (parser)->location;
ret = make_tree_vector ();
if (p_orig_types == NULL)
orig_types = NULL;
else
orig_types = make_tree_vector ();
expr = c_parser_expr_no_commas (parser, NULL);
if (convert_p)
expr = default_function_array_read_conversion (loc, expr);
if (fold_p)
expr.value = c_fully_fold (expr.value, false, NULL);
VEC_quick_push (tree, ret, expr.value);
if (orig_types != NULL)
VEC_quick_push (tree, orig_types, expr.original_type);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
loc = c_parser_peek_token (parser)->location;
expr = c_parser_expr_no_commas (parser, NULL);
if (convert_p)
expr = default_function_array_read_conversion (loc, expr);
if (fold_p)
expr.value = c_fully_fold (expr.value, false, NULL);
VEC_safe_push (tree, gc, ret, expr.value);
if (orig_types != NULL)
VEC_safe_push (tree, gc, orig_types, expr.original_type);
}
if (orig_types != NULL)
*p_orig_types = orig_types;
return ret;
}
/* Parse Objective-C-specific constructs. */
/* Parse an objc-class-definition.
objc-class-definition:
@interface identifier objc-superclass[opt] objc-protocol-refs[opt]
objc-class-instance-variables[opt] objc-methodprotolist @end
@implementation identifier objc-superclass[opt]
objc-class-instance-variables[opt]
@interface identifier ( identifier ) objc-protocol-refs[opt]
objc-methodprotolist @end
@interface identifier ( ) objc-protocol-refs[opt]
objc-methodprotolist @end
@implementation identifier ( identifier )
objc-superclass:
: identifier
"@interface identifier (" must start "@interface identifier (
identifier ) ...": objc-methodprotolist in the first production may
not start with a parenthesized identifier as a declarator of a data
definition with no declaration specifiers if the objc-superclass,
objc-protocol-refs and objc-class-instance-variables are omitted. */
static void
c_parser_objc_class_definition (c_parser *parser, tree attributes)
{
bool iface_p;
tree id1;
tree superclass;
if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE))
iface_p = true;
else if (c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION))
iface_p = false;
else
gcc_unreachable ();
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
id1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
/* We have a category or class extension. */
tree id2;
tree proto = NULL_TREE;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
if (iface_p && c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
/* We have a class extension. */
id2 = NULL_TREE;
}
else
{
c_parser_error (parser, "expected identifier or %<)%>");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return;
}
}
else
{
id2 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (!iface_p)
{
objc_start_category_implementation (id1, id2);
return;
}
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
objc_start_category_interface (id1, id2, proto, attributes);
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_finish_interface ();
return;
}
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
superclass = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
superclass = NULL_TREE;
if (iface_p)
{
tree proto = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
objc_start_class_interface (id1, superclass, proto, attributes);
}
else
objc_start_class_implementation (id1, superclass);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
c_parser_objc_class_instance_variables (parser);
if (iface_p)
{
objc_continue_interface ();
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_finish_interface ();
}
else
{
objc_continue_implementation ();
return;
}
}
/* Parse objc-class-instance-variables.
objc-class-instance-variables:
{ objc-instance-variable-decl-list[opt] }
objc-instance-variable-decl-list:
objc-visibility-spec
objc-instance-variable-decl ;
;
objc-instance-variable-decl-list objc-visibility-spec
objc-instance-variable-decl-list objc-instance-variable-decl ;
objc-instance-variable-decl-list ;
objc-visibility-spec:
@private
@protected
@public
objc-instance-variable-decl:
struct-declaration
*/
static void
c_parser_objc_class_instance_variables (c_parser *parser)
{
gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE));
c_parser_consume_token (parser);
while (c_parser_next_token_is_not (parser, CPP_EOF))
{
tree decls;
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"extra semicolon in struct or union specified");
c_parser_consume_token (parser);
continue;
}
/* Stop if at the end of the instance variables. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
break;
}
/* Parse any objc-visibility-spec. */
if (c_parser_next_token_is_keyword (parser, RID_AT_PRIVATE))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PRIVATE);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PROTECTED))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PROTECTED);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PUBLIC))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PUBLIC);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PACKAGE))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PACKAGE);
continue;
}
else if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
c_parser_pragma (parser, pragma_external);
continue;
}
/* Parse some comma-separated declarations. */
decls = c_parser_struct_declaration (parser);
{
/* Comma-separated instance variables are chained together in
reverse order; add them one by one. */
tree ivar = nreverse (decls);
for (; ivar; ivar = DECL_CHAIN (ivar))
objc_add_instance_variable (copy_node (ivar));
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
}
/* Parse an objc-class-declaration.
objc-class-declaration:
@class identifier-list ;
*/
static void
c_parser_objc_class_declaration (c_parser *parser)
{
tree list = NULL_TREE;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_CLASS));
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names, are OK
here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_class (list);
}
/* Parse an objc-alias-declaration.
objc-alias-declaration:
@compatibility_alias identifier identifier ;
*/
static void
c_parser_objc_alias_declaration (c_parser *parser)
{
tree id1, id2;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_ALIAS));
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
return;
}
id1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
return;
}
id2 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_alias (id1, id2);
}
/* Parse an objc-protocol-definition.
objc-protocol-definition:
@protocol identifier objc-protocol-refs[opt] objc-methodprotolist @end
@protocol identifier-list ;
"@protocol identifier ;" should be resolved as "@protocol
identifier-list ;": objc-methodprotolist may not start with a
semicolon in the first alternative if objc-protocol-refs are
omitted. */
static void
c_parser_objc_protocol_definition (c_parser *parser, tree attributes)
{
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL));
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
if (c_parser_peek_2nd_token (parser)->type == CPP_COMMA
|| c_parser_peek_2nd_token (parser)->type == CPP_SEMICOLON)
{
tree list = NULL_TREE;
/* Any identifiers, including those declared as type names, are
OK here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_protocols (list, attributes);
}
else
{
tree id = c_parser_peek_token (parser)->value;
tree proto = NULL_TREE;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
parser->objc_pq_context = true;
objc_start_protocol (id, proto, attributes);
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
parser->objc_pq_context = false;
objc_finish_interface ();
}
}
/* Parse an objc-method-type.
objc-method-type:
+
-
Return true if it is a class method (+) and false if it is
an instance method (-).
*/
static inline bool
c_parser_objc_method_type (c_parser *parser)
{
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
c_parser_consume_token (parser);
return true;
case CPP_MINUS:
c_parser_consume_token (parser);
return false;
default:
gcc_unreachable ();
}
}
/* Parse an objc-method-definition.
objc-method-definition:
objc-method-type objc-method-decl ;[opt] compound-statement
*/
static void
c_parser_objc_method_definition (c_parser *parser)
{
bool is_class_method = c_parser_objc_method_type (parser);
tree decl, attributes = NULL_TREE;
parser->objc_pq_context = true;
decl = c_parser_objc_method_decl (parser, is_class_method, &attributes);
if (decl == error_mark_node)
return; /* Bail here. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"extra semicolon in method definition specified");
}
if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
c_parser_error (parser, "expected %<{%>");
return;
}
parser->objc_pq_context = false;
if (objc_start_method_definition (is_class_method, decl, attributes))
{
add_stmt (c_parser_compound_statement (parser));
objc_finish_method_definition (current_function_decl);
}
else
{
/* This code is executed when we find a method definition
outside of an @implementation context (or invalid for other
reasons). Parse the method (to keep going) but do not emit
any code.
*/
c_parser_compound_statement (parser);
}
}
/* Parse an objc-methodprotolist.
objc-methodprotolist:
empty
objc-methodprotolist objc-methodproto
objc-methodprotolist declaration
objc-methodprotolist ;
@optional
@required
The declaration is a data definition, which may be missing
declaration specifiers under the same rules and diagnostics as
other data definitions outside functions, and the stray semicolon
is diagnosed the same way as a stray semicolon outside a
function. */
static void
c_parser_objc_methodprotolist (c_parser *parser)
{
while (true)
{
/* The list is terminated by @end. */
switch (c_parser_peek_token (parser)->type)
{
case CPP_SEMICOLON:
pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
case CPP_PLUS:
case CPP_MINUS:
c_parser_objc_methodproto (parser);
break;
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_external);
break;
case CPP_EOF:
return;
default:
if (c_parser_next_token_is_keyword (parser, RID_AT_END))
return;
else if (c_parser_next_token_is_keyword (parser, RID_AT_PROPERTY))
c_parser_objc_at_property_declaration (parser);
else if (c_parser_next_token_is_keyword (parser, RID_AT_OPTIONAL))
{
objc_set_method_opt (true);
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_REQUIRED))
{
objc_set_method_opt (false);
c_parser_consume_token (parser);
}
else
c_parser_declaration_or_fndef (parser, false, false, true,
false, true, NULL);
break;
}
}
}
/* Parse an objc-methodproto.
objc-methodproto:
objc-method-type objc-method-decl ;
*/
static void
c_parser_objc_methodproto (c_parser *parser)
{
bool is_class_method = c_parser_objc_method_type (parser);
tree decl, attributes = NULL_TREE;
/* Remember protocol qualifiers in prototypes. */
parser->objc_pq_context = true;
decl = c_parser_objc_method_decl (parser, is_class_method, &attributes);
/* Forget protocol qualifiers now. */
parser->objc_pq_context = false;
/* Do not allow the presence of attributes to hide an erroneous
method implementation in the interface section. */
if (!c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_error (parser, "expected %<;%>");
return;
}
if (decl != error_mark_node)
objc_add_method_declaration (is_class_method, decl, attributes);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* If we are at a position that method attributes may be present, check that
there are not any parsed already (a syntax error) and then collect any
specified at the current location. Finally, if new attributes were present,
check that the next token is legal ( ';' for decls and '{' for defs). */
static bool
c_parser_objc_maybe_method_attributes (c_parser* parser, tree* attributes)
{
bool bad = false;
if (*attributes)
{
c_parser_error (parser,
"method attributes must be specified at the end only");
*attributes = NULL_TREE;
bad = true;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
*attributes = c_parser_attributes (parser);
/* If there were no attributes here, just report any earlier error. */
if (*attributes == NULL_TREE || bad)
return bad;
/* If the attributes are followed by a ; or {, then just report any earlier
error. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return bad;
/* We've got attributes, but not at the end. */
c_parser_error (parser,
"expected %<;%> or %<{%> after method attribute definition");
return true;
}
/* Parse an objc-method-decl.
objc-method-decl:
( objc-type-name ) objc-selector
objc-selector
( objc-type-name ) objc-keyword-selector objc-optparmlist
objc-keyword-selector objc-optparmlist
attributes
objc-keyword-selector:
objc-keyword-decl
objc-keyword-selector objc-keyword-decl
objc-keyword-decl:
objc-selector : ( objc-type-name ) identifier
objc-selector : identifier
: ( objc-type-name ) identifier
: identifier
objc-optparmlist:
objc-optparms objc-optellipsis
objc-optparms:
empty
objc-opt-parms , parameter-declaration
objc-optellipsis:
empty
, ...
*/
static tree
c_parser_objc_method_decl (c_parser *parser, bool is_class_method, tree *attributes)
{
tree type = NULL_TREE;
tree sel;
tree parms = NULL_TREE;
bool ellipsis = false;
bool attr_err = false;
*attributes = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
type = c_parser_objc_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
sel = c_parser_objc_selector (parser);
/* If there is no selector, or a colon follows, we have an
objc-keyword-selector. If there is a selector, and a colon does
not follow, that selector ends the objc-method-decl. */
if (!sel || c_parser_next_token_is (parser, CPP_COLON))
{
tree tsel = sel;
tree list = NULL_TREE;
while (true)
{
tree atype = NULL_TREE, id, keyworddecl;
tree param_attr = NULL_TREE;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
break;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
atype = c_parser_objc_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
}
/* New ObjC allows attributes on method parameters. */
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
param_attr = c_parser_attributes (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return error_mark_node;
}
id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
keyworddecl = objc_build_keyword_decl (tsel, atype, id, param_attr);
list = chainon (list, keyworddecl);
tsel = c_parser_objc_selector (parser);
if (!tsel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ;
/* Parse the optional parameter list. Optional Objective-C
method parameters follow the C syntax, and may include '...'
to denote a variable number of arguments. */
parms = make_node (TREE_LIST);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_parm *parm;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
ellipsis = true;
c_parser_consume_token (parser);
attr_err |= c_parser_objc_maybe_method_attributes
(parser, attributes) ;
break;
}
parm = c_parser_parameter_declaration (parser, NULL_TREE);
if (parm == NULL)
break;
parms = chainon (parms,
build_tree_list (NULL_TREE, grokparm (parm)));
}
sel = list;
}
else
attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ;
if (sel == NULL)
{
c_parser_error (parser, "objective-c method declaration is expected");
return error_mark_node;
}
if (attr_err)
return error_mark_node;
return objc_build_method_signature (is_class_method, type, sel, parms, ellipsis);
}
/* Parse an objc-type-name.
objc-type-name:
objc-type-qualifiers[opt] type-name
objc-type-qualifiers[opt]
objc-type-qualifiers:
objc-type-qualifier
objc-type-qualifiers objc-type-qualifier
objc-type-qualifier: one of
in out inout bycopy byref oneway
*/
static tree
c_parser_objc_type_name (c_parser *parser)
{
tree quals = NULL_TREE;
struct c_type_name *type_name = NULL;
tree type = NULL_TREE;
while (true)
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_KEYWORD
&& (token->keyword == RID_IN
|| token->keyword == RID_OUT
|| token->keyword == RID_INOUT
|| token->keyword == RID_BYCOPY
|| token->keyword == RID_BYREF
|| token->keyword == RID_ONEWAY))
{
quals = chainon (build_tree_list (NULL_TREE, token->value), quals);
c_parser_consume_token (parser);
}
else
break;
}
if (c_parser_next_tokens_start_typename (parser, cla_prefer_type))
type_name = c_parser_type_name (parser);
if (type_name)
type = groktypename (type_name, NULL, NULL);
/* If the type is unknown, and error has already been produced and
we need to recover from the error. In that case, use NULL_TREE
for the type, as if no type had been specified; this will use the
default type ('id') which is good for error recovery. */
if (type == error_mark_node)
type = NULL_TREE;
return build_tree_list (quals, type);
}
/* Parse objc-protocol-refs.
objc-protocol-refs:
< identifier-list >
*/
static tree
c_parser_objc_protocol_refs (c_parser *parser)
{
tree list = NULL_TREE;
gcc_assert (c_parser_next_token_is (parser, CPP_LESS));
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names, are OK
here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_require (parser, CPP_GREATER, "expected %<>%>");
return list;
}
/* Parse an objc-try-catch-finally-statement.
objc-try-catch-finally-statement:
@try compound-statement objc-catch-list[opt]
@try compound-statement objc-catch-list[opt] @finally compound-statement
objc-catch-list:
@catch ( objc-catch-parameter-declaration ) compound-statement
objc-catch-list @catch ( objc-catch-parameter-declaration ) compound-statement
objc-catch-parameter-declaration:
parameter-declaration
'...'
where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS.
PS: This function is identical to cp_parser_objc_try_catch_finally_statement
for C++. Keep them in sync. */
static void
c_parser_objc_try_catch_finally_statement (c_parser *parser)
{
location_t location;
tree stmt;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_TRY));
c_parser_consume_token (parser);
location = c_parser_peek_token (parser)->location;
objc_maybe_warn_exceptions (location);
stmt = c_parser_compound_statement (parser);
objc_begin_try_stmt (location, stmt);
while (c_parser_next_token_is_keyword (parser, RID_AT_CATCH))
{
struct c_parm *parm;
tree parameter_declaration = error_mark_node;
bool seen_open_paren = false;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
seen_open_paren = true;
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
/* We have "@catch (...)" (where the '...' are literally
what is in the code). Skip the '...'.
parameter_declaration is set to NULL_TREE, and
objc_being_catch_clauses() knows that that means
'...'. */
c_parser_consume_token (parser);
parameter_declaration = NULL_TREE;
}
else
{
/* We have "@catch (NSException *exception)" or something
like that. Parse the parameter declaration. */
parm = c_parser_parameter_declaration (parser, NULL_TREE);
if (parm == NULL)
parameter_declaration = error_mark_node;
else
parameter_declaration = grokparm (parm);
}
if (seen_open_paren)
c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>");
else
{
/* If there was no open parenthesis, we are recovering from
an error, and we are trying to figure out what mistake
the user has made. */
/* If there is an immediate closing parenthesis, the user
probably forgot the opening one (ie, they typed "@catch
NSException *e)". Parse the closing parenthesis and keep
going. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
/* If these is no immediate closing parenthesis, the user
probably doesn't know that parenthesis are required at
all (ie, they typed "@catch NSException *e"). So, just
forget about the closing parenthesis and keep going. */
}
objc_begin_catch_clause (parameter_declaration);
if (c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
c_parser_compound_statement_nostart (parser);
objc_finish_catch_clause ();
}
if (c_parser_next_token_is_keyword (parser, RID_AT_FINALLY))
{
c_parser_consume_token (parser);
location = c_parser_peek_token (parser)->location;
stmt = c_parser_compound_statement (parser);
objc_build_finally_clause (location, stmt);
}
objc_finish_try_stmt ();
}
/* Parse an objc-synchronized-statement.
objc-synchronized-statement:
@synchronized ( expression ) compound-statement
*/
static void
c_parser_objc_synchronized_statement (c_parser *parser)
{
location_t loc;
tree expr, stmt;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNCHRONIZED));
c_parser_consume_token (parser);
loc = c_parser_peek_token (parser)->location;
objc_maybe_warn_exceptions (loc);
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr = c_parser_expression (parser).value;
expr = c_fully_fold (expr, false, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
expr = error_mark_node;
stmt = c_parser_compound_statement (parser);
objc_build_synchronized (loc, expr, stmt);
}
/* Parse an objc-selector; return NULL_TREE without an error if the
next token is not an objc-selector.
objc-selector:
identifier
one of
enum struct union if else while do for switch case default
break continue return goto asm sizeof typeof __alignof
unsigned long const short volatile signed restrict _Complex
in out inout bycopy byref oneway int char float double void _Bool
??? Why this selection of keywords but not, for example, storage
class specifiers? */
static tree
c_parser_objc_selector (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
tree value = token->value;
if (token->type == CPP_NAME)
{
c_parser_consume_token (parser);
return value;
}
if (token->type != CPP_KEYWORD)
return NULL_TREE;
switch (token->keyword)
{
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_IF:
case RID_ELSE:
case RID_WHILE:
case RID_DO:
case RID_FOR:
case RID_SWITCH:
case RID_CASE:
case RID_DEFAULT:
case RID_BREAK:
case RID_CONTINUE:
case RID_RETURN:
case RID_GOTO:
case RID_ASM:
case RID_SIZEOF:
case RID_TYPEOF:
case RID_ALIGNOF:
case RID_UNSIGNED:
case RID_LONG:
case RID_INT128:
case RID_CONST:
case RID_SHORT:
case RID_VOLATILE:
case RID_SIGNED:
case RID_RESTRICT:
case RID_COMPLEX:
case RID_IN:
case RID_OUT:
case RID_INOUT:
case RID_BYCOPY:
case RID_BYREF:
case RID_ONEWAY:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_BOOL:
c_parser_consume_token (parser);
return value;
default:
return NULL_TREE;
}
}
/* Parse an objc-selector-arg.
objc-selector-arg:
objc-selector
objc-keywordname-list
objc-keywordname-list:
objc-keywordname
objc-keywordname-list objc-keywordname
objc-keywordname:
objc-selector :
:
*/
static tree
c_parser_objc_selector_arg (c_parser *parser)
{
tree sel = c_parser_objc_selector (parser);
tree list = NULL_TREE;
if (sel && c_parser_next_token_is_not (parser, CPP_COLON))
return sel;
while (true)
{
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
return list;
list = chainon (list, build_tree_list (sel, NULL_TREE));
sel = c_parser_objc_selector (parser);
if (!sel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
return list;
}
/* Parse an objc-receiver.
objc-receiver:
expression
class-name
type-name
*/
static tree
c_parser_objc_receiver (c_parser *parser)
{
if (c_parser_peek_token (parser)->type == CPP_NAME
&& (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME
|| c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
return objc_get_class_reference (id);
}
return c_fully_fold (c_parser_expression (parser).value, false, NULL);
}
/* Parse objc-message-args.
objc-message-args:
objc-selector
objc-keywordarg-list
objc-keywordarg-list:
objc-keywordarg
objc-keywordarg-list objc-keywordarg
objc-keywordarg:
objc-selector : objc-keywordexpr
: objc-keywordexpr
*/
static tree
c_parser_objc_message_args (c_parser *parser)
{
tree sel = c_parser_objc_selector (parser);
tree list = NULL_TREE;
if (sel && c_parser_next_token_is_not (parser, CPP_COLON))
return sel;
while (true)
{
tree keywordexpr;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
return error_mark_node;
keywordexpr = c_parser_objc_keywordexpr (parser);
list = chainon (list, build_tree_list (sel, keywordexpr));
sel = c_parser_objc_selector (parser);
if (!sel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
return list;
}
/* Parse an objc-keywordexpr.
objc-keywordexpr:
nonempty-expr-list
*/
static tree
c_parser_objc_keywordexpr (c_parser *parser)
{
tree ret;
VEC(tree,gc) *expr_list = c_parser_expr_list (parser, true, true, NULL);
if (VEC_length (tree, expr_list) == 1)
{
/* Just return the expression, remove a level of
indirection. */
ret = VEC_index (tree, expr_list, 0);
}
else
{
/* We have a comma expression, we will collapse later. */
ret = build_tree_list_vec (expr_list);
}
release_tree_vector (expr_list);
return ret;
}
/* A check, needed in several places, that ObjC interface, implementation or
method definitions are not prefixed by incorrect items. */
static bool
c_parser_objc_diagnose_bad_element_prefix (c_parser *parser,
struct c_declspecs *specs)
{
if (!specs->declspecs_seen_p || specs->non_sc_seen_p
|| specs->typespec_kind != ctsk_none)
{
c_parser_error (parser,
"no type or storage class may be specified here,");
c_parser_skip_to_end_of_block_or_statement (parser);
return true;
}
return false;
}
/* Parse an Objective-C @property declaration. The syntax is:
objc-property-declaration:
'@property' objc-property-attributes[opt] struct-declaration ;
objc-property-attributes:
'(' objc-property-attribute-list ')'
objc-property-attribute-list:
objc-property-attribute
objc-property-attribute-list, objc-property-attribute
objc-property-attribute
'getter' = identifier
'setter' = identifier
'readonly'
'readwrite'
'assign'
'retain'
'copy'
'nonatomic'
For example:
@property NSString *name;
@property (readonly) id object;
@property (retain, nonatomic, getter=getTheName) id name;
@property int a, b, c;
PS: This function is identical to cp_parser_objc_at_propery_declaration
for C++. Keep them in sync. */
static void
c_parser_objc_at_property_declaration (c_parser *parser)
{
/* The following variables hold the attributes of the properties as
parsed. They are 'false' or 'NULL_TREE' if the attribute was not
seen. When we see an attribute, we set them to 'true' (if they
are boolean properties) or to the identifier (if they have an
argument, ie, for getter and setter). Note that here we only
parse the list of attributes, check the syntax and accumulate the
attributes that we find. objc_add_property_declaration() will
then process the information. */
bool property_assign = false;
bool property_copy = false;
tree property_getter_ident = NULL_TREE;
bool property_nonatomic = false;
bool property_readonly = false;
bool property_readwrite = false;
bool property_retain = false;
tree property_setter_ident = NULL_TREE;
/* 'properties' is the list of properties that we read. Usually a
single one, but maybe more (eg, in "@property int a, b, c;" there
are three). */
tree properties;
location_t loc;
loc = c_parser_peek_token (parser)->location;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROPERTY));
c_parser_consume_token (parser); /* Eat '@property'. */
/* Parse the optional attribute list... */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
/* Eat the '(' */
c_parser_consume_token (parser);
/* Property attribute keywords are valid now. */
parser->objc_property_attr_context = true;
while (true)
{
bool syntax_error = false;
c_token *token = c_parser_peek_token (parser);
enum rid keyword;
if (token->type != CPP_KEYWORD)
{
if (token->type == CPP_CLOSE_PAREN)
c_parser_error (parser, "expected identifier");
else
{
c_parser_consume_token (parser);
c_parser_error (parser, "unknown property attribute");
}
break;
}
keyword = token->keyword;
c_parser_consume_token (parser);
switch (keyword)
{
case RID_ASSIGN: property_assign = true; break;
case RID_COPY: property_copy = true; break;
case RID_NONATOMIC: property_nonatomic = true; break;
case RID_READONLY: property_readonly = true; break;
case RID_READWRITE: property_readwrite = true; break;
case RID_RETAIN: property_retain = true; break;
case RID_GETTER:
case RID_SETTER:
if (c_parser_next_token_is_not (parser, CPP_EQ))
{
if (keyword == RID_GETTER)
c_parser_error (parser,
"missing %<=%> (after %<getter%> attribute)");
else
c_parser_error (parser,
"missing %<=%> (after %<setter%> attribute)");
syntax_error = true;
break;
}
c_parser_consume_token (parser); /* eat the = */
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
syntax_error = true;
break;
}
if (keyword == RID_SETTER)
{
if (property_setter_ident != NULL_TREE)
c_parser_error (parser, "the %<setter%> attribute may only be specified once");
else
property_setter_ident = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_COLON))
c_parser_error (parser, "setter name must terminate with %<:%>");
else
c_parser_consume_token (parser);
}
else
{
if (property_getter_ident != NULL_TREE)
c_parser_error (parser, "the %<getter%> attribute may only be specified once");
else
property_getter_ident = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
break;
default:
c_parser_error (parser, "unknown property attribute");
syntax_error = true;
break;
}
if (syntax_error)
break;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
parser->objc_property_attr_context = false;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
/* ... and the property declaration(s). */
properties = c_parser_struct_declaration (parser);
if (properties == error_mark_node)
{
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
if (properties == NULL_TREE)
c_parser_error (parser, "expected identifier");
else
{
/* Comma-separated properties are chained together in
reverse order; add them one by one. */
properties = nreverse (properties);
for (; properties; properties = TREE_CHAIN (properties))
objc_add_property_declaration (loc, copy_node (properties),
property_readonly, property_readwrite,
property_assign, property_retain,
property_copy, property_nonatomic,
property_getter_ident, property_setter_ident);
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
parser->error = false;
}
/* Parse an Objective-C @synthesize declaration. The syntax is:
objc-synthesize-declaration:
@synthesize objc-synthesize-identifier-list ;
objc-synthesize-identifier-list:
objc-synthesize-identifier
objc-synthesize-identifier-list, objc-synthesize-identifier
objc-synthesize-identifier
identifier
identifier = identifier
For example:
@synthesize MyProperty;
@synthesize OneProperty, AnotherProperty=MyIvar, YetAnotherProperty;
PS: This function is identical to cp_parser_objc_at_synthesize_declaration
for C++. Keep them in sync.
*/
static void
c_parser_objc_at_synthesize_declaration (c_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNTHESIZE));
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
while (true)
{
tree property, ivar;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
/* Once we find the semicolon, we can resume normal parsing.
We have to reset parser->error manually because
c_parser_skip_until_found() won't reset it for us if the
next token is precisely a semicolon. */
parser->error = false;
return;
}
property = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_EQ))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
ivar = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
ivar = NULL_TREE;
list = chainon (list, build_tree_list (ivar, property));
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_add_synthesize_declaration (loc, list);
}
/* Parse an Objective-C @dynamic declaration. The syntax is:
objc-dynamic-declaration:
@dynamic identifier-list ;
For example:
@dynamic MyProperty;
@dynamic MyProperty, AnotherProperty;
PS: This function is identical to cp_parser_objc_at_dynamic_declaration
for C++. Keep them in sync.
*/
static void
c_parser_objc_at_dynamic_declaration (c_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_DYNAMIC));
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
while (true)
{
tree property;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
property = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, property));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_add_dynamic_declaration (loc, list);
}
/* Handle pragmas. Some OpenMP pragmas are associated with, and therefore
should be considered, statements. ALLOW_STMT is true if we're within
the context of a function and such pragmas are to be allowed. Returns
true if we actually parsed such a pragma. */
static bool
c_parser_pragma (c_parser *parser, enum pragma_context context)
{
unsigned int id;
id = c_parser_peek_token (parser)->pragma_kind;
gcc_assert (id != PRAGMA_NONE);
switch (id)
{
case PRAGMA_OMP_BARRIER:
if (context != pragma_compound)
{
if (context == pragma_stmt)
c_parser_error (parser, "%<#pragma omp barrier%> may only be "
"used in compound statements");
goto bad_stmt;
}
c_parser_omp_barrier (parser);
return false;
case PRAGMA_OMP_FLUSH:
if (context != pragma_compound)
{
if (context == pragma_stmt)
c_parser_error (parser, "%<#pragma omp flush%> may only be "
"used in compound statements");
goto bad_stmt;
}
c_parser_omp_flush (parser);
return false;
case PRAGMA_OMP_TASKWAIT:
if (context != pragma_compound)
{
if (context == pragma_stmt)
c_parser_error (parser, "%<#pragma omp taskwait%> may only be "
"used in compound statements");
goto bad_stmt;
}
c_parser_omp_taskwait (parser);
return false;
case PRAGMA_OMP_THREADPRIVATE:
c_parser_omp_threadprivate (parser);
return false;
case PRAGMA_OMP_SECTION:
error_at (c_parser_peek_token (parser)->location,
"%<#pragma omp section%> may only be used in "
"%<#pragma omp sections%> construct");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
case PRAGMA_GCC_PCH_PREPROCESS:
c_parser_error (parser, "%<#pragma GCC pch_preprocess%> must be first");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
default:
if (id < PRAGMA_FIRST_EXTERNAL)
{
if (context == pragma_external)
{
bad_stmt:
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
}
c_parser_omp_construct (parser);
return true;
}
break;
}
c_parser_consume_pragma (parser);
c_invoke_pragma_handler (id);
/* Skip to EOL, but suppress any error message. Those will have been
generated by the handler routine through calling error, as opposed
to calling c_parser_error. */
parser->error = true;
c_parser_skip_to_pragma_eol (parser);
return false;
}
/* The interface the pragma parsers have to the lexer. */
enum cpp_ttype
pragma_lex (tree *value)
{
c_token *tok = c_parser_peek_token (the_parser);
enum cpp_ttype ret = tok->type;
*value = tok->value;
if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF)
ret = CPP_EOF;
else
{
if (ret == CPP_KEYWORD)
ret = CPP_NAME;
c_parser_consume_token (the_parser);
}
return ret;
}
static void
c_parser_pragma_pch_preprocess (c_parser *parser)
{
tree name = NULL;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_STRING))
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
c_parser_error (parser, "expected string literal");
c_parser_skip_to_pragma_eol (parser);
if (name)
c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name));
}
/* OpenMP 2.5 parsing routines. */
/* Returns name of the next clause.
If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and
the token is not consumed. Otherwise appropriate pragma_omp_clause is
returned and the token is consumed. */
static pragma_omp_clause
c_parser_omp_clause_name (c_parser *parser)
{
pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE;
if (c_parser_next_token_is_keyword (parser, RID_IF))
result = PRAGMA_OMP_CLAUSE_IF;
else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT))
result = PRAGMA_OMP_CLAUSE_DEFAULT;
else if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'c':
if (!strcmp ("collapse", p))
result = PRAGMA_OMP_CLAUSE_COLLAPSE;
else if (!strcmp ("copyin", p))
result = PRAGMA_OMP_CLAUSE_COPYIN;
else if (!strcmp ("copyprivate", p))
result = PRAGMA_OMP_CLAUSE_COPYPRIVATE;
break;
case 'f':
if (!strcmp ("firstprivate", p))
result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE;
break;
case 'l':
if (!strcmp ("lastprivate", p))
result = PRAGMA_OMP_CLAUSE_LASTPRIVATE;
break;
case 'n':
if (!strcmp ("nowait", p))
result = PRAGMA_OMP_CLAUSE_NOWAIT;
else if (!strcmp ("num_threads", p))
result = PRAGMA_OMP_CLAUSE_NUM_THREADS;
break;
case 'o':
if (!strcmp ("ordered", p))
result = PRAGMA_OMP_CLAUSE_ORDERED;
break;
case 'p':
if (!strcmp ("private", p))
result = PRAGMA_OMP_CLAUSE_PRIVATE;
break;
case 'r':
if (!strcmp ("reduction", p))
result = PRAGMA_OMP_CLAUSE_REDUCTION;
break;
case 's':
if (!strcmp ("schedule", p))
result = PRAGMA_OMP_CLAUSE_SCHEDULE;
else if (!strcmp ("shared", p))
result = PRAGMA_OMP_CLAUSE_SHARED;
break;
case 'u':
if (!strcmp ("untied", p))
result = PRAGMA_OMP_CLAUSE_UNTIED;
break;
}
}
if (result != PRAGMA_OMP_CLAUSE_NONE)
c_parser_consume_token (parser);
return result;
}
/* Validate that a clause of the given type does not already exist. */
static void
check_no_duplicate_clause (tree clauses, enum omp_clause_code code,
const char *name)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == code)
{
location_t loc = OMP_CLAUSE_LOCATION (c);
error_at (loc, "too many %qs clauses", name);
break;
}
}
/* OpenMP 2.5:
variable-list:
identifier
variable-list , identifier
If KIND is nonzero, create the appropriate node and install the
decl in OMP_CLAUSE_DECL and add the node to the head of the list.
If KIND is nonzero, CLAUSE_LOC is the location of the clause.
If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE;
return the list created. */
static tree
c_parser_omp_variable_list (c_parser *parser,
location_t clause_loc,
enum omp_clause_code kind,
tree list)
{
if (c_parser_next_token_is_not (parser, CPP_NAME)
|| c_parser_peek_token (parser)->id_kind != C_ID_ID)
c_parser_error (parser, "expected identifier");
while (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
tree t = lookup_name (c_parser_peek_token (parser)->value);
if (t == NULL_TREE)
undeclared_variable (c_parser_peek_token (parser)->location,
c_parser_peek_token (parser)->value);
else if (t == error_mark_node)
;
else if (kind != 0)
{
tree u = build_omp_clause (clause_loc, kind);
OMP_CLAUSE_DECL (u) = t;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
else
list = tree_cons (t, NULL_TREE, list);
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
}
return list;
}
/* Similarly, but expect leading and trailing parenthesis. This is a very
common case for omp clauses. */
static tree
c_parser_omp_var_list_parens (c_parser *parser, enum omp_clause_code kind,
tree list)
{
/* The clauses location. */
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
list = c_parser_omp_variable_list (parser, loc, kind, list);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
return list;
}
/* OpenMP 3.0:
collapse ( constant-expression ) */
static tree
c_parser_omp_clause_collapse (c_parser *parser, tree list)
{
tree c, num = error_mark_node;
HOST_WIDE_INT n;
location_t loc;
check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse");
loc = c_parser_peek_token (parser)->location;
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
num = c_parser_expr_no_commas (parser, NULL).value;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
if (num == error_mark_node)
return list;
if (!INTEGRAL_TYPE_P (TREE_TYPE (num))
|| !host_integerp (num, 0)
|| (n = tree_low_cst (num, 0)) <= 0
|| (int) n != n)
{
error_at (loc,
"collapse argument needs positive constant integer expression");
return list;
}
c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_COLLAPSE_EXPR (c) = num;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
copyin ( variable-list ) */
static tree
c_parser_omp_clause_copyin (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYIN, list);
}
/* OpenMP 2.5:
copyprivate ( variable-list ) */
static tree
c_parser_omp_clause_copyprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYPRIVATE, list);
}
/* OpenMP 2.5:
default ( shared | none ) */
static tree
c_parser_omp_clause_default (c_parser *parser, tree list)
{
enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
location_t loc = c_parser_peek_token (parser)->location;
tree c;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'n':
if (strcmp ("none", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_NONE;
break;
case 's':
if (strcmp ("shared", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_SHARED;
break;
default:
goto invalid_kind;
}
c_parser_consume_token (parser);
}
else
{
invalid_kind:
c_parser_error (parser, "expected %<none%> or %<shared%>");
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default");
c = build_omp_clause (loc, OMP_CLAUSE_DEFAULT);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_DEFAULT_KIND (c) = kind;
return c;
}
/* OpenMP 2.5:
firstprivate ( variable-list ) */
static tree
c_parser_omp_clause_firstprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FIRSTPRIVATE, list);
}
/* OpenMP 2.5:
if ( expression ) */
static tree
c_parser_omp_clause_if (c_parser *parser, tree list)
{
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree t = c_parser_paren_condition (parser);
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if");
c = build_omp_clause (loc, OMP_CLAUSE_IF);
OMP_CLAUSE_IF_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
else
c_parser_error (parser, "expected %<(%>");
return list;
}
/* OpenMP 2.5:
lastprivate ( variable-list ) */
static tree
c_parser_omp_clause_lastprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LASTPRIVATE, list);
}
/* OpenMP 2.5:
nowait */
static tree
c_parser_omp_clause_nowait (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
location_t loc = c_parser_peek_token (parser)->location;
check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait");
c = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
num_threads ( expression ) */
static tree
c_parser_omp_clause_num_threads (c_parser *parser, tree list)
{
location_t num_threads_loc = c_parser_peek_token (parser)->location;
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
tree c, t = c_parser_expression (parser).value;
t = c_fully_fold (t, false, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
if (CAN_HAVE_LOCATION_P (c))
SET_EXPR_LOCATION (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0,
"%<num_threads%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads");
c = build_omp_clause (num_threads_loc, OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 2.5:
ordered */
static tree
c_parser_omp_clause_ordered (c_parser *parser, tree list)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered");
c = build_omp_clause (c_parser_peek_token (parser)->location,
OMP_CLAUSE_ORDERED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
private ( variable-list ) */
static tree
c_parser_omp_clause_private (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_PRIVATE, list);
}
/* OpenMP 2.5:
reduction ( reduction-operator : variable-list )
reduction-operator:
One of: + * - & ^ | && || */
static tree
c_parser_omp_clause_reduction (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
enum tree_code code;
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
code = PLUS_EXPR;
break;
case CPP_MULT:
code = MULT_EXPR;
break;
case CPP_MINUS:
code = MINUS_EXPR;
break;
case CPP_AND:
code = BIT_AND_EXPR;
break;
case CPP_XOR:
code = BIT_XOR_EXPR;
break;
case CPP_OR:
code = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
code = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
code = TRUTH_ORIF_EXPR;
break;
default:
c_parser_error (parser,
"expected %<+%>, %<*%>, %<-%>, %<&%>, "
"%<^%>, %<|%>, %<&&%>, or %<||%>");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
c_parser_consume_token (parser);
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
tree nl, c;
nl = c_parser_omp_variable_list (parser, clause_loc,
OMP_CLAUSE_REDUCTION, list);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_REDUCTION_CODE (c) = code;
list = nl;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
return list;
}
/* OpenMP 2.5:
schedule ( schedule-kind )
schedule ( schedule-kind , expression )
schedule-kind:
static | dynamic | guided | runtime | auto
*/
static tree
c_parser_omp_clause_schedule (c_parser *parser, tree list)
{
tree c, t;
location_t loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return list;
c = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE);
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree kind = c_parser_peek_token (parser)->value;
const char *p = IDENTIFIER_POINTER (kind);
switch (p[0])
{
case 'd':
if (strcmp ("dynamic", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
break;
case 'g':
if (strcmp ("guided", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
break;
case 'r':
if (strcmp ("runtime", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
break;
default:
goto invalid_kind;
}
}
else if (c_parser_next_token_is_keyword (parser, RID_STATIC))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
else if (c_parser_next_token_is_keyword (parser, RID_AUTO))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
else
goto invalid_kind;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
{
location_t here;
c_parser_consume_token (parser);
here = c_parser_peek_token (parser)->location;
t = c_parser_expr_no_commas (parser, NULL).value;
t = c_fully_fold (t, false, NULL);
if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME)
error_at (here, "schedule %<runtime%> does not take "
"a %<chunk_size%> parameter");
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO)
error_at (here,
"schedule %<auto%> does not take "
"a %<chunk_size%> parameter");
else if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE)
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
else
c_parser_error (parser, "expected integer expression");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<,%> or %<)%>");
check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule");
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
c_parser_error (parser, "invalid schedule kind");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
/* OpenMP 2.5:
shared ( variable-list ) */
static tree
c_parser_omp_clause_shared (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_SHARED, list);
}
/* OpenMP 3.0:
untied */
static tree
c_parser_omp_clause_untied (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
/* FIXME: Should we allow duplicates? */
check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied");
c = build_omp_clause (c_parser_peek_token (parser)->location,
OMP_CLAUSE_UNTIED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* Parse all OpenMP clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found; the result
of clause default goes in *pdefault. */
static tree
c_parser_omp_all_clauses (c_parser *parser, unsigned int mask,
const char *where)
{
tree clauses = NULL;
bool first = true;
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
location_t here;
pragma_omp_clause c_kind;
const char *c_name;
tree prev = clauses;
if (!first && c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
first = false;
here = c_parser_peek_token (parser)->location;
c_kind = c_parser_omp_clause_name (parser);
switch (c_kind)
{
case PRAGMA_OMP_CLAUSE_COLLAPSE:
clauses = c_parser_omp_clause_collapse (parser, clauses);
c_name = "collapse";
break;
case PRAGMA_OMP_CLAUSE_COPYIN:
clauses = c_parser_omp_clause_copyin (parser, clauses);
c_name = "copyin";
break;
case PRAGMA_OMP_CLAUSE_COPYPRIVATE:
clauses = c_parser_omp_clause_copyprivate (parser, clauses);
c_name = "copyprivate";
break;
case PRAGMA_OMP_CLAUSE_DEFAULT:
clauses = c_parser_omp_clause_default (parser, clauses);
c_name = "default";
break;
case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE:
clauses = c_parser_omp_clause_firstprivate (parser, clauses);
c_name = "firstprivate";
break;
case PRAGMA_OMP_CLAUSE_IF:
clauses = c_parser_omp_clause_if (parser, clauses);
c_name = "if";
break;
case PRAGMA_OMP_CLAUSE_LASTPRIVATE:
clauses = c_parser_omp_clause_lastprivate (parser, clauses);
c_name = "lastprivate";
break;
case PRAGMA_OMP_CLAUSE_NOWAIT:
clauses = c_parser_omp_clause_nowait (parser, clauses);
c_name = "nowait";
break;
case PRAGMA_OMP_CLAUSE_NUM_THREADS:
clauses = c_parser_omp_clause_num_threads (parser, clauses);
c_name = "num_threads";
break;
case PRAGMA_OMP_CLAUSE_ORDERED:
clauses = c_parser_omp_clause_ordered (parser, clauses);
c_name = "ordered";
break;
case PRAGMA_OMP_CLAUSE_PRIVATE:
clauses = c_parser_omp_clause_private (parser, clauses);
c_name = "private";
break;
case PRAGMA_OMP_CLAUSE_REDUCTION:
clauses = c_parser_omp_clause_reduction (parser, clauses);
c_name = "reduction";
break;
case PRAGMA_OMP_CLAUSE_SCHEDULE:
clauses = c_parser_omp_clause_schedule (parser, clauses);
c_name = "schedule";
break;
case PRAGMA_OMP_CLAUSE_SHARED:
clauses = c_parser_omp_clause_shared (parser, clauses);
c_name = "shared";
break;
case PRAGMA_OMP_CLAUSE_UNTIED:
clauses = c_parser_omp_clause_untied (parser, clauses);
c_name = "untied";
break;
default:
c_parser_error (parser, "expected %<#pragma omp%> clause");
goto saw_error;
}
if (((mask >> c_kind) & 1) == 0 && !parser->error)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error_at (here, "%qs is not valid for %qs", c_name, where);
}
}
saw_error:
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_clauses (clauses);
}
/* OpenMP 2.5:
structured-block:
statement
In practice, we're also interested in adding the statement to an
outer node. So it is convenient if we work around the fact that
c_parser_statement calls add_stmt. */
static tree
c_parser_omp_structured_block (c_parser *parser)
{
tree stmt = push_stmt_list ();
c_parser_statement (parser);
return pop_stmt_list (stmt);
}
/* OpenMP 2.5:
# pragma omp atomic new-line
expression-stmt
expression-stmt:
x binop= expr | x++ | ++x | x-- | --x
binop:
+, *, -, /, &, ^, |, <<, >>
where x is an lvalue expression with scalar type.
LOC is the location of the #pragma token. */
static void
c_parser_omp_atomic (location_t loc, c_parser *parser)
{
tree lhs, rhs;
tree stmt;
enum tree_code code;
struct c_expr rhs_expr;
c_parser_skip_to_pragma_eol (parser);
lhs = c_parser_unary_expression (parser).value;
lhs = c_fully_fold (lhs, false, NULL);
switch (TREE_CODE (lhs))
{
case ERROR_MARK:
saw_error:
c_parser_skip_to_end_of_block_or_statement (parser);
return;
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
code = PLUS_EXPR;
rhs = integer_one_node;
break;
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
code = MINUS_EXPR;
rhs = integer_one_node;
break;
case COMPOUND_EXPR:
if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR
&& TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR
&& TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0)
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND
(TREE_OPERAND (lhs, 1), 0), 0)))
== BOOLEAN_TYPE)
/* Undo effects of boolean_increment for post {in,de}crement. */
lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0);
/* FALLTHRU */
case MODIFY_EXPR:
if (TREE_CODE (lhs) == MODIFY_EXPR
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE)
{
/* Undo effects of boolean_increment. */
if (integer_onep (TREE_OPERAND (lhs, 1)))
{
/* This is pre or post increment. */
rhs = TREE_OPERAND (lhs, 1);
lhs = TREE_OPERAND (lhs, 0);
code = NOP_EXPR;
break;
}
if (TREE_CODE (TREE_OPERAND (lhs, 1)) == TRUTH_NOT_EXPR
&& TREE_OPERAND (lhs, 0)
== TREE_OPERAND (TREE_OPERAND (lhs, 1), 0))
{
/* This is pre or post decrement. */
rhs = TREE_OPERAND (lhs, 1);
lhs = TREE_OPERAND (lhs, 0);
code = NOP_EXPR;
break;
}
}
/* FALLTHRU */
default:
switch (c_parser_peek_token (parser)->type)
{
case CPP_MULT_EQ:
code = MULT_EXPR;
break;
case CPP_DIV_EQ:
code = TRUNC_DIV_EXPR;
break;
case CPP_PLUS_EQ:
code = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
code = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
code = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
code = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
code = BIT_AND_EXPR;
break;
case CPP_OR_EQ:
code = BIT_IOR_EXPR;
break;
case CPP_XOR_EQ:
code = BIT_XOR_EXPR;
break;
default:
c_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
/* Arrange to pass the location of the assignment operator to
c_finish_omp_atomic. */
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
{
location_t rhs_loc = c_parser_peek_token (parser)->location;
rhs_expr = c_parser_expression (parser);
rhs_expr = default_function_array_read_conversion (rhs_loc, rhs_expr);
}
rhs = rhs_expr.value;
rhs = c_fully_fold (rhs, false, NULL);
break;
}
stmt = c_finish_omp_atomic (loc, code, lhs, rhs);
if (stmt != error_mark_node)
add_stmt (stmt);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* OpenMP 2.5:
# pragma omp barrier new-line
*/
static void
c_parser_omp_barrier (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_barrier (loc);
}
/* OpenMP 2.5:
# pragma omp critical [(name)] new-line
structured-block
LOC is the location of the #pragma itself. */
static tree
c_parser_omp_critical (location_t loc, c_parser *parser)
{
tree stmt, name = NULL;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
c_parser_error (parser, "expected identifier");
}
else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
c_parser_error (parser, "expected %<(%> or end of line");
c_parser_skip_to_pragma_eol (parser);
stmt = c_parser_omp_structured_block (parser);
return c_finish_omp_critical (loc, stmt, name);
}
/* OpenMP 2.5:
# pragma omp flush flush-vars[opt] new-line
flush-vars:
( variable-list ) */
static void
c_parser_omp_flush (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL);
else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
c_parser_error (parser, "expected %<(%> or end of line");
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_flush (loc);
}
/* Parse the restricted form of the for statement allowed by OpenMP.
The real trick here is to determine the loop control variable early
so that we can push a new decl if necessary to make it private.
LOC is the location of the OMP in "#pragma omp". */
static tree
c_parser_omp_for_loop (location_t loc,
c_parser *parser, tree clauses, tree *par_clauses)
{
tree decl, cond, incr, save_break, save_cont, body, init, stmt, cl;
tree declv, condv, incrv, initv, ret = NULL;
bool fail = false, open_brace_parsed = false;
int i, collapse = 1, nbraces = 0;
location_t for_loc;
VEC(tree,gc) *for_block = make_tree_vector ();
for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl))
if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE)
collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0);
gcc_assert (collapse >= 1);
declv = make_tree_vec (collapse);
initv = make_tree_vec (collapse);
condv = make_tree_vec (collapse);
incrv = make_tree_vec (collapse);
if (!c_parser_next_token_is_keyword (parser, RID_FOR))
{
c_parser_error (parser, "for statement expected");
return NULL;
}
for_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
for (i = 0; i < collapse; i++)
{
int bracecount = 0;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
goto pop_scopes;
/* Parse the initialization declaration or expression. */
if (c_parser_next_tokens_start_declaration (parser))
{
if (i > 0)
VEC_safe_push (tree, gc, for_block, c_begin_compound_stmt (true));
c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL);
decl = check_for_loop_decls (for_loc, flag_isoc99);
if (decl == NULL)
goto error_init;
if (DECL_INITIAL (decl) == error_mark_node)
decl = error_mark_node;
init = decl;
}
else if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_EQ)
{
struct c_expr decl_exp;
struct c_expr init_exp;
location_t init_loc;
decl_exp = c_parser_postfix_expression (parser);
decl = decl_exp.value;
c_parser_require (parser, CPP_EQ, "expected %<=%>");
init_loc = c_parser_peek_token (parser)->location;
init_exp = c_parser_expr_no_commas (parser, NULL);
init_exp = default_function_array_read_conversion (init_loc,
init_exp);
init = build_modify_expr (init_loc, decl, decl_exp.original_type,
NOP_EXPR, init_loc, init_exp.value,
init_exp.original_type);
init = c_process_expr_stmt (init_loc, init);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
else
{
error_init:
c_parser_error (parser,
"expected iteration declaration or initialization");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
fail = true;
goto parse_next;
}
/* Parse the loop condition. */
cond = NULL_TREE;
if (c_parser_next_token_is_not (parser, CPP_SEMICOLON))
{
location_t cond_loc = c_parser_peek_token (parser)->location;
struct c_expr cond_expr = c_parser_binary_expression (parser, NULL);
cond = cond_expr.value;
cond = c_objc_common_truthvalue_conversion (cond_loc, cond);
cond = c_fully_fold (cond, false, NULL);
switch (cond_expr.original_code)
{
case GT_EXPR:
case GE_EXPR:
case LT_EXPR:
case LE_EXPR:
break;
default:
/* Can't be cond = error_mark_node, because we want to preserve
the location until c_finish_omp_for. */
cond = build1 (NOP_EXPR, boolean_type_node, error_mark_node);
break;
}
protected_set_expr_location (cond, cond_loc);
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
/* Parse the increment expression. */
incr = NULL_TREE;
if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN))
{
location_t incr_loc = c_parser_peek_token (parser)->location;
incr = c_process_expr_stmt (incr_loc,
c_parser_expression (parser).value);
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (decl == NULL || decl == error_mark_node || init == error_mark_node)
fail = true;
else
{
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
}
parse_next:
if (i == collapse - 1)
break;
/* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed
in between the collapsed for loops to be still considered perfectly
nested. Hopefully the final version clarifies this.
For now handle (multiple) {'s and empty statements. */
do
{
if (c_parser_next_token_is_keyword (parser, RID_FOR))
{
c_parser_consume_token (parser);
break;
}
else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
c_parser_consume_token (parser);
bracecount++;
}
else if (bracecount
&& c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_consume_token (parser);
else
{
c_parser_error (parser, "not enough perfectly nested loops");
if (bracecount)
{
open_brace_parsed = true;
bracecount--;
}
fail = true;
collapse = 0;
break;
}
}
while (1);
nbraces += bracecount;
}
save_break = c_break_label;
c_break_label = size_one_node;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = push_stmt_list ();
if (open_brace_parsed)
{
location_t here = c_parser_peek_token (parser)->location;
stmt = c_begin_compound_stmt (true);
c_parser_compound_statement_nostart (parser);
add_stmt (c_end_compound_stmt (here, stmt, true));
}
else
add_stmt (c_parser_c99_block_statement (parser));
if (c_cont_label)
{
tree t = build1 (LABEL_EXPR, void_type_node, c_cont_label);
SET_EXPR_LOCATION (t, loc);
add_stmt (t);
}
body = pop_stmt_list (body);
c_break_label = save_break;
c_cont_label = save_cont;
while (nbraces)
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
nbraces--;
}
else if (c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_consume_token (parser);
else
{
c_parser_error (parser, "collapsed loops not perfectly nested");
while (nbraces)
{
location_t here = c_parser_peek_token (parser)->location;
stmt = c_begin_compound_stmt (true);
add_stmt (body);
c_parser_compound_statement_nostart (parser);
body = c_end_compound_stmt (here, stmt, true);
nbraces--;
}
goto pop_scopes;
}
}
/* Only bother calling c_finish_omp_for if we haven't already generated
an error from the initialization parsing. */
if (!fail)
{
stmt = c_finish_omp_for (loc, declv, initv, condv, incrv, body, NULL);
if (stmt)
{
if (par_clauses != NULL)
{
tree *c;
for (c = par_clauses; *c ; )
if (OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_LASTPRIVATE)
c = &OMP_CLAUSE_CHAIN (*c);
else
{
for (i = 0; i < collapse; i++)
if (TREE_VEC_ELT (declv, i) == OMP_CLAUSE_DECL (*c))
break;
if (i == collapse)
c = &OMP_CLAUSE_CHAIN (*c);
else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE)
{
error_at (loc,
"iteration variable %qD should not be firstprivate",
OMP_CLAUSE_DECL (*c));
*c = OMP_CLAUSE_CHAIN (*c);
}
else
{
/* Copy lastprivate (decl) clause to OMP_FOR_CLAUSES,
change it to shared (decl) in
OMP_PARALLEL_CLAUSES. */
tree l = build_omp_clause (OMP_CLAUSE_LOCATION (*c),
OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (l) = OMP_CLAUSE_DECL (*c);
OMP_CLAUSE_CHAIN (l) = clauses;
clauses = l;
OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED);
}
}
}
OMP_FOR_CLAUSES (stmt) = clauses;
}
ret = stmt;
}
pop_scopes:
while (!VEC_empty (tree, for_block))
{
/* FIXME diagnostics: LOC below should be the actual location of
this particular for block. We need to build a list of
locations to go along with FOR_BLOCK. */
stmt = c_end_compound_stmt (loc, VEC_pop (tree, for_block), true);
add_stmt (stmt);
}
release_tree_vector (for_block);
return ret;
}
/* OpenMP 2.5:
#pragma omp for for-clause[optseq] new-line
for-loop
LOC is the location of the #pragma token.
*/
#define OMP_FOR_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_ORDERED) \
| (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (1u << PRAGMA_OMP_CLAUSE_COLLAPSE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_for (location_t loc, c_parser *parser)
{
tree block, clauses, ret;
clauses = c_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK,
"#pragma omp for");
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (loc, parser, clauses, NULL);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* OpenMP 2.5:
# pragma omp master new-line
structured-block
LOC is the location of the #pragma token.
*/
static tree
c_parser_omp_master (location_t loc, c_parser *parser)
{
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_master (loc, c_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
# pragma omp ordered new-line
structured-block
LOC is the location of the #pragma itself.
*/
static tree
c_parser_omp_ordered (location_t loc, c_parser *parser)
{
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_ordered (loc, c_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
section-scope:
{ section-sequence }
section-sequence:
section-directive[opt] structured-block
section-sequence section-directive structured-block
SECTIONS_LOC is the location of the #pragma omp sections. */
static tree
c_parser_omp_sections_scope (location_t sections_loc, c_parser *parser)
{
tree stmt, substmt;
bool error_suppress = false;
location_t loc;
loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
{
/* Avoid skipping until the end of the block. */
parser->error = false;
return NULL_TREE;
}
stmt = push_stmt_list ();
if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_SECTION)
{
substmt = push_stmt_list ();
while (1)
{
c_parser_statement (parser);
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION)
break;
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
if (c_parser_next_token_is (parser, CPP_EOF))
break;
}
substmt = pop_stmt_list (substmt);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
}
while (1)
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
if (c_parser_next_token_is (parser, CPP_EOF))
break;
loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION)
{
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
error_suppress = false;
}
else if (!error_suppress)
{
error_at (loc, "expected %<#pragma omp section%> or %<}%>");
error_suppress = true;
}
substmt = c_parser_omp_structured_block (parser);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
}
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE,
"expected %<#pragma omp section%> or %<}%>");
substmt = pop_stmt_list (stmt);
stmt = make_node (OMP_SECTIONS);
SET_EXPR_LOCATION (stmt, sections_loc);
TREE_TYPE (stmt) = void_type_node;
OMP_SECTIONS_BODY (stmt) = substmt;
return add_stmt (stmt);
}
/* OpenMP 2.5:
# pragma omp sections sections-clause[optseq] newline
sections-scope
LOC is the location of the #pragma token.
*/
#define OMP_SECTIONS_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_sections (location_t loc, c_parser *parser)
{
tree block, clauses, ret;
clauses = c_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK,
"#pragma omp sections");
block = c_begin_compound_stmt (true);
ret = c_parser_omp_sections_scope (loc, parser);
if (ret)
OMP_SECTIONS_CLAUSES (ret) = clauses;
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* OpenMP 2.5:
# pragma parallel parallel-clause new-line
# pragma parallel for parallel-for-clause new-line
# pragma parallel sections parallel-sections-clause new-line
LOC is the location of the #pragma token.
*/
#define OMP_PARALLEL_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_IF) \
| (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (1u << PRAGMA_OMP_CLAUSE_SHARED) \
| (1u << PRAGMA_OMP_CLAUSE_COPYIN) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS))
static tree
c_parser_omp_parallel (location_t loc, c_parser *parser)
{
enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL;
const char *p_name = "#pragma omp parallel";
tree stmt, clauses, par_clause, ws_clause, block;
unsigned int mask = OMP_PARALLEL_CLAUSE_MASK;
if (c_parser_next_token_is_keyword (parser, RID_FOR))
{
c_parser_consume_token (parser);
p_kind = PRAGMA_OMP_PARALLEL_FOR;
p_name = "#pragma omp parallel for";
mask |= OMP_FOR_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
else if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "sections") == 0)
{
c_parser_consume_token (parser);
p_kind = PRAGMA_OMP_PARALLEL_SECTIONS;
p_name = "#pragma omp parallel sections";
mask |= OMP_SECTIONS_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
}
clauses = c_parser_omp_all_clauses (parser, mask, p_name);
switch (p_kind)
{
case PRAGMA_OMP_PARALLEL:
block = c_begin_omp_parallel ();
c_parser_statement (parser);
stmt = c_finish_omp_parallel (loc, clauses, block);
break;
case PRAGMA_OMP_PARALLEL_FOR:
block = c_begin_omp_parallel ();
c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause);
c_parser_omp_for_loop (loc, parser, ws_clause, &par_clause);
stmt = c_finish_omp_parallel (loc, par_clause, block);
OMP_PARALLEL_COMBINED (stmt) = 1;
break;
case PRAGMA_OMP_PARALLEL_SECTIONS:
block = c_begin_omp_parallel ();
c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause);
stmt = c_parser_omp_sections_scope (loc, parser);
if (stmt)
OMP_SECTIONS_CLAUSES (stmt) = ws_clause;
stmt = c_finish_omp_parallel (loc, par_clause, block);
OMP_PARALLEL_COMBINED (stmt) = 1;
break;
default:
gcc_unreachable ();
}
return stmt;
}
/* OpenMP 2.5:
# pragma omp single single-clause[optseq] new-line
structured-block
LOC is the location of the #pragma.
*/
#define OMP_SINGLE_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_single (location_t loc, c_parser *parser)
{
tree stmt = make_node (OMP_SINGLE);
SET_EXPR_LOCATION (stmt, loc);
TREE_TYPE (stmt) = void_type_node;
OMP_SINGLE_CLAUSES (stmt)
= c_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK,
"#pragma omp single");
OMP_SINGLE_BODY (stmt) = c_parser_omp_structured_block (parser);
return add_stmt (stmt);
}
/* OpenMP 3.0:
# pragma omp task task-clause[optseq] new-line
LOC is the location of the #pragma.
*/
#define OMP_TASK_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_IF) \
| (1u << PRAGMA_OMP_CLAUSE_UNTIED) \
| (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_SHARED))
static tree
c_parser_omp_task (location_t loc, c_parser *parser)
{
tree clauses, block;
clauses = c_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK,
"#pragma omp task");
block = c_begin_omp_task ();
c_parser_statement (parser);
return c_finish_omp_task (loc, clauses, block);
}
/* OpenMP 3.0:
# pragma omp taskwait new-line
*/
static void
c_parser_omp_taskwait (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_taskwait (loc);
}
/* Main entry point to parsing most OpenMP pragmas. */
static void
c_parser_omp_construct (c_parser *parser)
{
enum pragma_kind p_kind;
location_t loc;
tree stmt;
loc = c_parser_peek_token (parser)->location;
p_kind = c_parser_peek_token (parser)->pragma_kind;
c_parser_consume_pragma (parser);
switch (p_kind)
{
case PRAGMA_OMP_ATOMIC:
c_parser_omp_atomic (loc, parser);
return;
case PRAGMA_OMP_CRITICAL:
stmt = c_parser_omp_critical (loc, parser);
break;
case PRAGMA_OMP_FOR:
stmt = c_parser_omp_for (loc, parser);
break;
case PRAGMA_OMP_MASTER:
stmt = c_parser_omp_master (loc, parser);
break;
case PRAGMA_OMP_ORDERED:
stmt = c_parser_omp_ordered (loc, parser);
break;
case PRAGMA_OMP_PARALLEL:
stmt = c_parser_omp_parallel (loc, parser);
break;
case PRAGMA_OMP_SECTIONS:
stmt = c_parser_omp_sections (loc, parser);
break;
case PRAGMA_OMP_SINGLE:
stmt = c_parser_omp_single (loc, parser);
break;
case PRAGMA_OMP_TASK:
stmt = c_parser_omp_task (loc, parser);
break;
default:
gcc_unreachable ();
}
if (stmt)
gcc_assert (EXPR_LOCATION (stmt) != UNKNOWN_LOCATION);
}
/* OpenMP 2.5:
# pragma omp threadprivate (variable-list) */
static void
c_parser_omp_threadprivate (c_parser *parser)
{
tree vars, t;
location_t loc;
c_parser_consume_pragma (parser);
loc = c_parser_peek_token (parser)->location;
vars = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL);
/* Mark every variable in VARS to be assigned thread local storage. */
for (t = vars; t; t = TREE_CHAIN (t))
{
tree v = TREE_PURPOSE (t);
/* FIXME diagnostics: Ideally we should keep individual
locations for all the variables in the var list to make the
following errors more precise. Perhaps
c_parser_omp_var_list_parens() should construct a list of
locations to go along with the var list. */
/* If V had already been marked threadprivate, it doesn't matter
whether it had been used prior to this point. */
if (TREE_CODE (v) != VAR_DECL)
error_at (loc, "%qD is not a variable", v);
else if (TREE_USED (v) && !C_DECL_THREADPRIVATE_P (v))
error_at (loc, "%qE declared %<threadprivate%> after first use", v);
else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v))
error_at (loc, "automatic variable %qE cannot be %<threadprivate%>", v);
else if (TREE_TYPE (v) == error_mark_node)
;
else if (! COMPLETE_TYPE_P (TREE_TYPE (v)))
error_at (loc, "%<threadprivate%> %qE has incomplete type", v);
else
{
if (! DECL_THREAD_LOCAL_P (v))
{
DECL_TLS_MODEL (v) = decl_default_tls_model (v);
/* If rtl has been already set for this var, call
make_decl_rtl once again, so that encode_section_info
has a chance to look at the new decl flags. */
if (DECL_RTL_SET_P (v))
make_decl_rtl (v);
}
C_DECL_THREADPRIVATE_P (v) = 1;
}
}
c_parser_skip_to_pragma_eol (parser);
}
/* Parse a single source file. */
void
c_parse_file (void)
{
/* Use local storage to begin. If the first token is a pragma, parse it.
If it is #pragma GCC pch_preprocess, then this will load a PCH file
which will cause garbage collection. */
c_parser tparser;
memset (&tparser, 0, sizeof tparser);
the_parser = &tparser;
if (c_parser_peek_token (&tparser)->pragma_kind == PRAGMA_GCC_PCH_PREPROCESS)
c_parser_pragma_pch_preprocess (&tparser);
the_parser = ggc_alloc_c_parser ();
*the_parser = tparser;
/* Initialize EH, if we've been told to do so. */
if (flag_exceptions)
using_eh_for_cleanups ();
c_parser_translation_unit (the_parser);
the_parser = NULL;
}
#include "gt-c-parser.h"
|
network_simplex_simple.h | /* -*- mode: C++; indent-tabs-mode: nil; -*-
*
*
* This file has been adapted by Nicolas Bonneel (2013),
* from network_simplex.h from LEMON, a generic C++ optimization library,
* to implement a lightweight network simplex for mass transport, more
* memory efficient than the original file. A previous version of this file
* is used as part of the Displacement Interpolation project,
* Web: http://www.cs.ubc.ca/labs/imager/tr/2011/DisplacementInterpolation/
*
* Revisions:
* March 2015: added OpenMP parallelization
* March 2017: included Antoine Rolet's trick to make it more robust
* April 2018: IMPORTANT bug fix + uses 64bit integers (slightly slower but less risks of overflows), updated to a newer version of the algo by LEMON, sparse flow by default + minor edits.
*
*
**** Original file Copyright Notice :
*
* Copyright (C) 2003-2010
* Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport
* (Egervary Research Group on Combinatorial Optimization, EGRES).
*
* Permission to use, modify and distribute this software is granted
* provided that this copyright notice appears in all copies. For
* precise terms see the accompanying LICENSE file.
*
* This software is provided "AS IS" with no warranty of any kind,
* express or implied, and with no claim as to its suitability for any
* purpose.
*
*/
#ifndef LEMON_NETWORK_SIMPLEX_SIMPLE_H
#define LEMON_NETWORK_SIMPLEX_SIMPLE_H
/// \ingroup min_cost_flow_algs
///
/// \file
/// \brief Network Simplex algorithm for finding a minimum cost flow.
// if your compiler has troubles with unorderedmaps, just comment the following line to use a slower std::map instead
#define HASHMAP // now handled with unorderedmaps instead of stdext::hash_map. Should be better supported.
#define SPARSE_FLOW // a sparse flow vector will be 10-15% slower for small problems but uses less memory and becomes faster for large problems (40k total nodes)
#include <vector>
#include <limits>
#include <algorithm>
#ifdef HASHMAP
#include <unordered_map>
#else
#include <map>
#endif
//#include "core.h"
//#include "lmath.h"
#include <omp.h>
#include <cmath>
//#include "sparse_array_n.h"
#include "full_bipartitegraph.h"
#define INVALIDNODE -1
#define INVALID (-1)
namespace lemon {
template <typename T>
class ProxyObject;
template<typename T>
class SparseValueVector
{
public:
SparseValueVector(size_t n = 0) // parameter n for compatibility with standard vectors
{
}
void resize(size_t n = 0) {};
T operator[](const size_t id) const
{
#ifdef HASHMAP
typename std::unordered_map<size_t, T>::const_iterator it = data.find(id);
#else
typename std::map<size_t, T>::const_iterator it = data.find(id);
#endif
if (it == data.end())
return 0;
else
return it->second;
}
ProxyObject<T> operator[](const size_t id)
{
return ProxyObject<T>(this, id);
}
//private:
#ifdef HASHMAP
std::unordered_map<size_t, T> data;
#else
std::map<size_t, T> data;
#endif
};
template <typename T>
class ProxyObject {
public:
ProxyObject(SparseValueVector<T> *v, size_t idx) { _v = v; _idx = idx; };
ProxyObject<T> & operator=(const T &v) {
// If we get here, we know that operator[] was called to perform a write access,
// so we can insert an item in the vector if needed
if (v != 0)
_v->data[_idx] = v;
return *this;
}
operator T() {
// If we get here, we know that operator[] was called to perform a read access,
// so we can simply return the existing object
#ifdef HASHMAP
typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx);
#else
typename std::map<size_t, T>::iterator it = _v->data.find(_idx);
#endif
if (it == _v->data.end())
return 0;
else
return it->second;
}
void operator+=(T val)
{
if (val == 0) return;
#ifdef HASHMAP
typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx);
#else
typename std::map<size_t, T>::iterator it = _v->data.find(_idx);
#endif
if (it == _v->data.end())
_v->data[_idx] = val;
else
{
T sum = it->second + val;
if (sum == 0)
_v->data.erase(it);
else
it->second = sum;
}
}
void operator-=(T val)
{
if (val == 0) return;
#ifdef HASHMAP
typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx);
#else
typename std::map<size_t, T>::iterator it = _v->data.find(_idx);
#endif
if (it == _v->data.end())
_v->data[_idx] = -val;
else
{
T sum = it->second - val;
if (sum == 0)
_v->data.erase(it);
else
it->second = sum;
}
}
SparseValueVector<T> *_v;
size_t _idx;
};
/// \addtogroup min_cost_flow_algs
/// @{
/// \brief Implementation of the primal Network Simplex algorithm
/// for finding a \ref min_cost_flow "minimum cost flow".
///
/// \ref NetworkSimplexSimple implements the primal Network Simplex algorithm
/// for finding a \ref min_cost_flow "minimum cost flow"
/// \ref amo93networkflows, \ref dantzig63linearprog,
/// \ref kellyoneill91netsimplex.
/// This algorithm is a highly efficient specialized version of the
/// linear programming simplex method directly for the minimum cost
/// flow problem.
///
/// In general, %NetworkSimplexSimple is the fastest implementation available
/// in LEMON for this problem.
/// Moreover, it supports both directions of the supply/demand inequality
/// constraints. For more information, see \ref SupplyType.
///
/// Most of the parameters of the problem (except for the digraph)
/// can be given using separate functions, and the algorithm can be
/// executed using the \ref run() function. If some parameters are not
/// specified, then default values will be used.
///
/// \tparam GR The digraph type the algorithm runs on.
/// \tparam V The number type used for flow amounts, capacity bounds
/// and supply values in the algorithm. By default, it is \c int.
/// \tparam C The number type used for costs and potentials in the
/// algorithm. By default, it is the same as \c V.
///
/// \warning Both number types must be signed and all input data must
/// be integer.
///
/// \note %NetworkSimplexSimple provides five different pivot rule
/// implementations, from which the most efficient one is used
/// by default. For more information, see \ref PivotRule.
template <typename GR, typename V = int, typename C = V, typename ArcsType = int64_t>
class NetworkSimplexSimple
{
public:
/// \brief Constructor.
///
/// The constructor of the class.
///
/// \param graph The digraph the algorithm runs on.
/// \param arc_mixing Indicate if the arcs have to be stored in a
/// mixed order in the internal data structure.
/// In special cases, it could lead to better overall performance,
/// but it is usually slower. Therefore it is disabled by default.
NetworkSimplexSimple(const GR& graph, bool arc_mixing, int nbnodes, ArcsType nb_arcs, size_t maxiters = 0) :
_graph(graph), //_arc_id(graph),
_arc_mixing(arc_mixing), _init_nb_nodes(nbnodes), _init_nb_arcs(nb_arcs),
MAX(std::numeric_limits<Value>::max()),
INF(std::numeric_limits<Value>::has_infinity ?
std::numeric_limits<Value>::infinity() : MAX)
{
// Reset data structures
reset();
max_iter = maxiters;
}
/// The type of the flow amounts, capacity bounds and supply values
typedef V Value;
/// The type of the arc costs
typedef C Cost;
public:
/// \brief Problem type constants for the \c run() function.
///
/// Enum type containing the problem type constants that can be
/// returned by the \ref run() function of the algorithm.
enum ProblemType {
/// The problem has no feasible solution (flow).
INFEASIBLE,
/// The problem has optimal solution (i.e. it is feasible and
/// bounded), and the algorithm has found optimal flow and node
/// potentials (primal and dual solutions).
OPTIMAL,
/// The objective function of the problem is unbounded, i.e.
/// there is a directed cycle having negative total cost and
/// infinite upper bound.
UNBOUNDED
};
/// \brief Constants for selecting the type of the supply constraints.
///
/// Enum type containing constants for selecting the supply type,
/// i.e. the direction of the inequalities in the supply/demand
/// constraints of the \ref min_cost_flow "minimum cost flow problem".
///
/// The default supply type is \c GEQ, the \c LEQ type can be
/// selected using \ref supplyType().
/// The equality form is a special case of both supply types.
enum SupplyType {
/// This option means that there are <em>"greater or equal"</em>
/// supply/demand constraints in the definition of the problem.
GEQ,
/// This option means that there are <em>"less or equal"</em>
/// supply/demand constraints in the definition of the problem.
LEQ
};
private:
size_t max_iter;
TEMPLATE_DIGRAPH_TYPEDEFS(GR);
typedef std::vector<int> IntVector;
typedef std::vector<ArcsType> ArcVector;
typedef std::vector<Value> ValueVector;
typedef std::vector<Cost> CostVector;
// typedef SparseValueVector<Cost> CostVector;
typedef std::vector<char> BoolVector;
// Note: vector<char> is used instead of vector<bool> for efficiency reasons
// State constants for arcs
enum ArcState {
STATE_UPPER = -1,
STATE_TREE = 0,
STATE_LOWER = 1
};
typedef std::vector<signed char> StateVector;
// Note: vector<signed char> is used instead of vector<ArcState> for
// efficiency reasons
private:
// Data related to the underlying digraph
const GR &_graph;
int _node_num;
ArcsType _arc_num;
ArcsType _all_arc_num;
ArcsType _search_arc_num;
// Parameters of the problem
SupplyType _stype;
Value _sum_supply;
inline int _node_id(int n) const { return _node_num - n - 1; };
//IntArcMap _arc_id;
IntVector _source; // keep nodes as integers
IntVector _target;
bool _arc_mixing;
// Node and arc data
CostVector _cost;
ValueVector _supply;
#ifdef SPARSE_FLOW
SparseValueVector<Value> _flow;
#else
ValueVector _flow;
#endif
CostVector _pi;
// Data for storing the spanning tree structure
IntVector _parent;
ArcVector _pred;
IntVector _thread;
IntVector _rev_thread;
IntVector _succ_num;
IntVector _last_succ;
IntVector _dirty_revs;
BoolVector _forward;
StateVector _state;
ArcsType _root;
// Temporary data used in the current pivot iteration
ArcsType in_arc, join, u_in, v_in, u_out, v_out;
ArcsType first, second, right, last;
ArcsType stem, par_stem, new_stem;
Value delta;
const Value MAX;
ArcsType mixingCoeff;
public:
/// \brief Constant for infinite upper bounds (capacities).
///
/// Constant for infinite upper bounds (capacities).
/// It is \c std::numeric_limits<Value>::infinity() if available,
/// \c std::numeric_limits<Value>::max() otherwise.
const Value INF;
private:
// thank you to DVK and MizardX from StackOverflow for this function!
inline ArcsType sequence(ArcsType k) const {
ArcsType smallv = (k > num_total_big_subsequence_numbers) & 1;
k -= num_total_big_subsequence_numbers * smallv;
ArcsType subsequence_length2 = subsequence_length - smallv;
ArcsType subsequence_num = (k / subsequence_length2) + num_big_subsequences * smallv;
ArcsType subsequence_offset = (k % subsequence_length2) * mixingCoeff;
return subsequence_offset + subsequence_num;
}
ArcsType subsequence_length;
ArcsType num_big_subsequences;
ArcsType num_total_big_subsequence_numbers;
inline ArcsType getArcID(const Arc &arc) const
{
//int n = _arc_num-arc._id-1;
ArcsType n = _arc_num - GR::id(arc) - 1;
//ArcsType a = mixingCoeff*(n%mixingCoeff) + n/mixingCoeff;
//ArcsType b = _arc_id[arc];
if (_arc_mixing)
return sequence(n);
else
return n;
}
// finally unused because too slow
inline ArcsType getSource(const ArcsType arc) const
{
//ArcsType a = _source[arc];
//return a;
ArcsType n = _arc_num - arc - 1;
if (_arc_mixing)
n = mixingCoeff*(n%mixingCoeff) + n / mixingCoeff;
ArcsType b;
if (n >= 0)
b = _node_id(_graph.source(GR::arcFromId(n)));
else
{
n = arc + 1 - _arc_num;
if (n <= _node_num)
b = _node_num;
else
if (n >= _graph._n1)
b = _graph._n1;
else
b = _graph._n1 - n;
}
return b;
}
// Implementation of the Block Search pivot rule
class BlockSearchPivotRule
{
private:
// References to the NetworkSimplexSimple class
const IntVector &_source;
const IntVector &_target;
const CostVector &_cost;
const StateVector &_state;
const CostVector &_pi;
ArcsType &_in_arc;
ArcsType _search_arc_num;
// Pivot rule data
ArcsType _block_size;
ArcsType _next_arc;
NetworkSimplexSimple &_ns;
public:
// Constructor
BlockSearchPivotRule(NetworkSimplexSimple &ns) :
_source(ns._source), _target(ns._target),
_cost(ns._cost), _state(ns._state), _pi(ns._pi),
_in_arc(ns.in_arc), _search_arc_num(ns._search_arc_num),
_next_arc(0), _ns(ns)
{
// The main parameters of the pivot rule
const double BLOCK_SIZE_FACTOR = 1;
const ArcsType MIN_BLOCK_SIZE = 10;
_block_size = std::max(ArcsType(BLOCK_SIZE_FACTOR * std::sqrt(double(_search_arc_num))), MIN_BLOCK_SIZE);
}
// Find next entering arc
bool findEnteringArc() {
Cost min_val = 0;
ArcsType N = omp_get_max_threads();
std::vector<Cost> minArray(N, 0);
std::vector<ArcsType> arcId(N);
ArcsType bs = (ArcsType)ceil(_block_size / (double)N);
for (ArcsType i = 0; i < _search_arc_num; i += _block_size) {
ArcsType e;
ArcsType j;
#pragma omp parallel
{
int t = omp_get_thread_num();
#pragma omp for schedule(static, bs) lastprivate(e)
for (j = 0; j < std::min(i + _block_size, _search_arc_num) - i; j++) {
e = (_next_arc + i + j); if (e >= _search_arc_num) e -= _search_arc_num;
Cost c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
if (c < minArray[t]) {
minArray[t] = c;
arcId[t] = e;
}
}
}
for (int j = 0; j < N; j++) {
if (minArray[j] < min_val) {
min_val = minArray[j];
_in_arc = arcId[j];
}
}
Cost a = std::abs(_pi[_source[_in_arc]]) > std::abs(_pi[_target[_in_arc]]) ? std::abs(_pi[_source[_in_arc]]) : std::abs(_pi[_target[_in_arc]]);
a = a > std::abs(_cost[_in_arc]) ? a : std::abs(_cost[_in_arc]);
if (min_val < -std::numeric_limits<Cost>::epsilon()*a) {
_next_arc = e;
return true;
}
}
Cost a = fabs(_pi[_source[_in_arc]]) > fabs(_pi[_target[_in_arc]]) ? fabs(_pi[_source[_in_arc]]) : fabs(_pi[_target[_in_arc]]);
a = a > fabs(_cost[_in_arc]) ? a : fabs(_cost[_in_arc]);
if (min_val >= -std::numeric_limits<Cost>::epsilon()*a) return false;
return true;
}
// Find next entering arc
/*bool findEnteringArc() {
Cost min_val = 0;
int N = omp_get_max_threads();
std::vector<Cost> minArray(N);
std::vector<ArcsType> arcId(N);
ArcsType bs = (ArcsType)ceil(_block_size / (double)N);
for (ArcsType i = 0; i < _search_arc_num; i += _block_size) {
ArcsType maxJ = std::min(i + _block_size, _search_arc_num) - i;
ArcsType j;
#pragma omp parallel
{
int t = omp_get_thread_num();
Cost minV = 0;
ArcsType arcStart = _next_arc + i;
ArcsType arc = -1;
#pragma omp for schedule(static, bs)
for (j = 0; j < maxJ; j++) {
ArcsType e = arcStart + j; if (e >= _search_arc_num) e -= _search_arc_num;
Cost c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
if (c < minV) {
minV = c;
arc = e;
}
}
minArray[t] = minV;
arcId[t] = arc;
}
for (int j = 0; j < N; j++) {
if (minArray[j] < min_val) {
min_val = minArray[j];
_in_arc = arcId[j];
}
}
//FIX by Antoine Rolet to avoid precision issues
Cost a = std::max(std::abs(_cost[_in_arc]), std::max(std::abs(_pi[_source[_in_arc]]), std::abs(_pi[_target[_in_arc]])));
if (min_val <-std::numeric_limits<Cost>::epsilon()*a) {
_next_arc = _next_arc + i + maxJ - 1;
if (_next_arc >= _search_arc_num) _next_arc -= _search_arc_num;
return true;
}
}
if (min_val >= 0) {
return false;
}
return true;
}*/
/*bool findEnteringArc() {
Cost c, min = 0;
int cnt = _block_size;
int e, min_arc = _next_arc;
for (e = _next_arc; e < _search_arc_num; ++e) {
c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
if (c < min) {
min = c;
min_arc = e;
}
if (--cnt == 0) {
if (min < 0) break;
cnt = _block_size;
}
}
if (min == 0 || cnt > 0) {
for (e = 0; e < _next_arc; ++e) {
c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]);
if (c < min) {
min = c;
min_arc = e;
}
if (--cnt == 0) {
if (min < 0) break;
cnt = _block_size;
}
}
}
if (min >= 0) return false;
_in_arc = min_arc;
_next_arc = e;
return true;
}*/
}; //class BlockSearchPivotRule
public:
int _init_nb_nodes;
ArcsType _init_nb_arcs;
/// \name Parameters
/// The parameters of the algorithm can be specified using these
/// functions.
/// @{
/// \brief Set the costs of the arcs.
///
/// This function sets the costs of the arcs.
/// If it is not used before calling \ref run(), the costs
/// will be set to \c 1 on all arcs.
///
/// \param map An arc map storing the costs.
/// Its \c Value type must be convertible to the \c Cost type
/// of the algorithm.
///
/// \return <tt>(*this)</tt>
template<typename CostMap>
NetworkSimplexSimple& costMap(const CostMap& map) {
Arc a; _graph.first(a);
for (; a != INVALID; _graph.next(a)) {
_cost[getArcID(a)] = map[a];
}
return *this;
}
/// \brief Set the costs of one arc.
///
/// This function sets the costs of one arcs.
/// Done for memory reasons
///
/// \param arc An arc.
/// \param arc A cost
///
/// \return <tt>(*this)</tt>
template<typename Value>
NetworkSimplexSimple& setCost(const Arc& arc, const Value cost) {
_cost[getArcID(arc)] = cost;
return *this;
}
/// \brief Set the supply values of the nodes.
///
/// This function sets the supply values of the nodes.
/// If neither this function nor \ref stSupply() is used before
/// calling \ref run(), the supply of each node will be set to zero.
///
/// \param map A node map storing the supply values.
/// Its \c Value type must be convertible to the \c Value type
/// of the algorithm.
///
/// \return <tt>(*this)</tt>
template<typename SupplyMap>
NetworkSimplexSimple& supplyMap(const SupplyMap& map) {
Node n; _graph.first(n);
for (; n != INVALIDNODE; _graph.next(n)) {
_supply[_node_id(n)] = map[n];
}
return *this;
}
template<typename SupplyMap>
NetworkSimplexSimple& supplyMap(const SupplyMap* map1, int n1, const SupplyMap* map2, int n2) {
Node n; _graph.first(n);
for (; n != INVALIDNODE; _graph.next(n)) {
if (n<n1)
_supply[_node_id(n)] = map1[n];
else
_supply[_node_id(n)] = map2[n - n1];
}
return *this;
}
template<typename SupplyMap>
NetworkSimplexSimple& supplyMapAll(SupplyMap val1, int n1, SupplyMap val2, int n2) {
Node n; _graph.first(n);
for (; n != INVALIDNODE; _graph.next(n)) {
if (n<n1)
_supply[_node_id(n)] = val1;
else
_supply[_node_id(n)] = val2;
}
return *this;
}
/// \brief Set single source and target nodes and a supply value.
///
/// This function sets a single source node and a single target node
/// and the required flow value.
/// If neither this function nor \ref supplyMap() is used before
/// calling \ref run(), the supply of each node will be set to zero.
///
/// Using this function has the same effect as using \ref supplyMap()
/// with such a map in which \c k is assigned to \c s, \c -k is
/// assigned to \c t and all other nodes have zero supply value.
///
/// \param s The source node.
/// \param t The target node.
/// \param k The required amount of flow from node \c s to node \c t
/// (i.e. the supply of \c s and the demand of \c t).
///
/// \return <tt>(*this)</tt>
NetworkSimplexSimple& stSupply(const Node& s, const Node& t, Value k) {
for (int i = 0; i != _node_num; ++i) {
_supply[i] = 0;
}
_supply[_node_id(s)] = k;
_supply[_node_id(t)] = -k;
return *this;
}
/// \brief Set the type of the supply constraints.
///
/// This function sets the type of the supply/demand constraints.
/// If it is not used before calling \ref run(), the \ref GEQ supply
/// type will be used.
///
/// For more information, see \ref SupplyType.
///
/// \return <tt>(*this)</tt>
NetworkSimplexSimple& supplyType(SupplyType supply_type) {
_stype = supply_type;
return *this;
}
/// @}
/// \name Execution Control
/// The algorithm can be executed using \ref run().
/// @{
/// \brief Run the algorithm.
///
/// This function runs the algorithm.
/// The paramters can be specified using functions \ref lowerMap(),
/// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(),
/// \ref supplyType().
/// For example,
/// \code
/// NetworkSimplexSimple<ListDigraph> ns(graph);
/// ns.lowerMap(lower).upperMap(upper).costMap(cost)
/// .supplyMap(sup).run();
/// \endcode
///
/// This function can be called more than once. All the given parameters
/// are kept for the next call, unless \ref resetParams() or \ref reset()
/// is used, thus only the modified parameters have to be set again.
/// If the underlying digraph was also modified after the construction
/// of the class (or the last \ref reset() call), then the \ref reset()
/// function must be called.
///
/// \param pivot_rule The pivot rule that will be used during the
/// algorithm. For more information, see \ref PivotRule.
///
/// \return \c INFEASIBLE if no feasible flow exists,
/// \n \c OPTIMAL if the problem has optimal solution
/// (i.e. it is feasible and bounded), and the algorithm has found
/// optimal flow and node potentials (primal and dual solutions),
/// \n \c UNBOUNDED if the objective function of the problem is
/// unbounded, i.e. there is a directed cycle having negative total
/// cost and infinite upper bound.
///
/// \see ProblemType, PivotRule
/// \see resetParams(), reset()
ProblemType run() {
if (!init()) return INFEASIBLE;
return start();
}
/// \brief Reset all the parameters that have been given before.
///
/// This function resets all the paramaters that have been given
/// before using functions \ref lowerMap(), \ref upperMap(),
/// \ref costMap(), \ref supplyMap(), \ref stSupply(), \ref supplyType().
///
/// It is useful for multiple \ref run() calls. Basically, all the given
/// parameters are kept for the next \ref run() call, unless
/// \ref resetParams() or \ref reset() is used.
/// If the underlying digraph was also modified after the construction
/// of the class or the last \ref reset() call, then the \ref reset()
/// function must be used, otherwise \ref resetParams() is sufficient.
///
/// For example,
/// \code
/// NetworkSimplexSimple<ListDigraph> ns(graph);
///
/// // First run
/// ns.lowerMap(lower).upperMap(upper).costMap(cost)
/// .supplyMap(sup).run();
///
/// // Run again with modified cost map (resetParams() is not called,
/// // so only the cost map have to be set again)
/// cost[e] += 100;
/// ns.costMap(cost).run();
///
/// // Run again from scratch using resetParams()
/// // (the lower bounds will be set to zero on all arcs)
/// ns.resetParams();
/// ns.upperMap(capacity).costMap(cost)
/// .supplyMap(sup).run();
/// \endcode
///
/// \return <tt>(*this)</tt>
///
/// \see reset(), run()
NetworkSimplexSimple& resetParams() {
for (int i = 0; i != _node_num; ++i) {
_supply[i] = 0;
}
for (ArcsType i = 0; i != _arc_num; ++i) {
_cost[i] = 1;
}
_stype = GEQ;
return *this;
}
/// \brief Reset the internal data structures and all the parameters
/// that have been given before.
///
/// This function resets the internal data structures and all the
/// paramaters that have been given before using functions \ref lowerMap(),
/// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(),
/// \ref supplyType().
///
/// It is useful for multiple \ref run() calls. Basically, all the given
/// parameters are kept for the next \ref run() call, unless
/// \ref resetParams() or \ref reset() is used.
/// If the underlying digraph was also modified after the construction
/// of the class or the last \ref reset() call, then the \ref reset()
/// function must be used, otherwise \ref resetParams() is sufficient.
///
/// See \ref resetParams() for examples.
///
/// \return <tt>(*this)</tt>
///
/// \see resetParams(), run()
NetworkSimplexSimple& reset() {
// Resize vectors
_node_num = _init_nb_nodes;
_arc_num = _init_nb_arcs;
int all_node_num = _node_num + 1;
ArcsType max_arc_num = _arc_num + 2 * _node_num;
_source.resize(max_arc_num);
_target.resize(max_arc_num);
_cost.resize(max_arc_num);
_supply.resize(all_node_num);
_flow.resize(max_arc_num);
_pi.resize(all_node_num);
_parent.resize(all_node_num);
_pred.resize(all_node_num);
_forward.resize(all_node_num);
_thread.resize(all_node_num);
_rev_thread.resize(all_node_num);
_succ_num.resize(all_node_num);
_last_succ.resize(all_node_num);
_state.resize(max_arc_num);
//_arc_mixing=false;
if (_arc_mixing && _node_num > 1) {
// Store the arcs in a mixed order
//ArcsType k = std::max(ArcsType(std::sqrt(double(_arc_num))), ArcsType(10));
const ArcsType k = std::max(ArcsType(_arc_num / _node_num), ArcsType(3));
mixingCoeff = k;
subsequence_length = _arc_num / mixingCoeff + 1;
num_big_subsequences = _arc_num % mixingCoeff;
num_total_big_subsequence_numbers = subsequence_length * num_big_subsequences;
#pragma omp parallel for schedule(static)
for (Arc a = 0; a <= _graph.maxArcId(); a++) { // --a <=> _graph.next(a) , -1 == INVALID
ArcsType i = sequence(_graph.maxArcId()-a);
_source[i] = _node_id(_graph.source(a));
_target[i] = _node_id(_graph.target(a));
}
} else {
// Store the arcs in the original order
ArcsType i = 0;
Arc a; _graph.first(a);
for (; a != INVALID; _graph.next(a), ++i) {
_source[i] = _node_id(_graph.source(a));
_target[i] = _node_id(_graph.target(a));
//_arc_id[a] = i;
}
}
// Reset parameters
resetParams();
return *this;
}
/// @}
/// \name Query Functions
/// The results of the algorithm can be obtained using these
/// functions.\n
/// The \ref run() function must be called before using them.
/// @{
/// \brief Return the total cost of the found flow.
///
/// This function returns the total cost of the found flow.
/// Its complexity is O(e).
///
/// \note The return type of the function can be specified as a
/// template parameter. For example,
/// \code
/// ns.totalCost<double>();
/// \endcode
/// It is useful if the total cost cannot be stored in the \c Cost
/// type of the algorithm, which is the default return type of the
/// function.
///
/// \pre \ref run() must be called before using this function.
/*template <typename Number>
Number totalCost() const {
Number c = 0;
for (ArcIt a(_graph); a != INVALID; ++a) {
int i = getArcID(a);
c += Number(_flow[i]) * Number(_cost[i]);
}
return c;
}*/
template <typename Number>
Number totalCost() const {
Number c = 0;
#ifdef SPARSE_FLOW
#ifdef HASHMAP
typename std::unordered_map<size_t, Value>::const_iterator it;
#else
typename std::map<size_t, Value>::const_iterator it;
#endif
for (it = _flow.data.begin(); it!=_flow.data.end(); ++it)
c += Number(it->second) * Number(_cost[it->first]);
return c;
#else
for (ArcsType i = 0; i<_flow.size(); i++)
c += _flow[i] * Number(_cost[i]);
return c;
#endif
}
#ifndef DOXYGEN
Cost totalCost() const {
return totalCost<Cost>();
}
#endif
/// \brief Return the flow on the given arc.
///
/// This function returns the flow on the given arc.
///
/// \pre \ref run() must be called before using this function.
Value flow(const Arc& a) const {
return _flow[getArcID(a)];
}
/// \brief Return the flow map (the primal solution).
///
/// This function copies the flow value on each arc into the given
/// map. The \c Value type of the algorithm must be convertible to
/// the \c Value type of the map.
///
/// \pre \ref run() must be called before using this function.
template <typename FlowMap>
void flowMap(FlowMap &map) const {
Arc a; _graph.first(a);
for (; a != INVALID; _graph.next(a)) {
map.set(a, _flow[getArcID(a)]);
}
}
/// \brief Return the potential (dual value) of the given node.
///
/// This function returns the potential (dual value) of the
/// given node.
///
/// \pre \ref run() must be called before using this function.
Cost potential(const Node& n) const {
return _pi[_node_id(n)];
}
/// \brief Return the potential map (the dual solution).
///
/// This function copies the potential (dual value) of each node
/// into the given map.
/// The \c Cost type of the algorithm must be convertible to the
/// \c Value type of the map.
///
/// \pre \ref run() must be called before using this function.
template <typename PotentialMap>
void potentialMap(PotentialMap &map) const {
Node n; _graph.first(n);
for (; n != INVALID; _graph.next(n)) {
map.set(n, _pi[_node_id(n)]);
}
}
/// @}
private:
// Initialize internal data structures
bool init() {
if (_node_num == 0) return false;
// Check the sum of supply values
_sum_supply = 0;
for (int i = 0; i != _node_num; ++i) {
_sum_supply += _supply[i];
}
/*if (!((_stype == GEQ && _sum_supply <= 0) ||
(_stype == LEQ && _sum_supply >= 0))) return false;*/
// Initialize artifical cost
Cost ART_COST;
if (std::numeric_limits<Cost>::is_exact) {
ART_COST = std::numeric_limits<Cost>::max() / 2 + 1;
} else {
ART_COST = 0;
for (ArcsType i = 0; i != _arc_num; ++i) {
if (_cost[i] > ART_COST) ART_COST = _cost[i];
}
ART_COST = (ART_COST + 1) * _node_num;
}
// Initialize arc maps
for (ArcsType i = 0; i != _arc_num; ++i) {
#ifndef SPARSE_FLOW
_flow[i] = 0; //by default, the sparse matrix is empty
#endif
_state[i] = STATE_LOWER;
}
#ifdef SPARSE_FLOW
_flow = SparseValueVector<Value>();
#endif
// Set data for the artificial root node
_root = _node_num;
_parent[_root] = -1;
_pred[_root] = -1;
_thread[_root] = 0;
_rev_thread[0] = _root;
_succ_num[_root] = _node_num + 1;
_last_succ[_root] = _root - 1;
_supply[_root] = -_sum_supply;
_pi[_root] = 0;
// Add artificial arcs and initialize the spanning tree data structure
if (_sum_supply == 0) {
// EQ supply constraints
_search_arc_num = _arc_num;
_all_arc_num = _arc_num + _node_num;
for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) {
_parent[u] = _root;
_pred[u] = e;
_thread[u] = u + 1;
_rev_thread[u + 1] = u;
_succ_num[u] = 1;
_last_succ[u] = u;
_state[e] = STATE_TREE;
if (_supply[u] >= 0) {
_forward[u] = true;
_pi[u] = 0;
_source[e] = u;
_target[e] = _root;
_flow[e] = _supply[u];
_cost[e] = 0;
} else {
_forward[u] = false;
_pi[u] = ART_COST;
_source[e] = _root;
_target[e] = u;
_flow[e] = -_supply[u];
_cost[e] = ART_COST;
}
}
} else if (_sum_supply > 0) {
// LEQ supply constraints
_search_arc_num = _arc_num + _node_num;
ArcsType f = _arc_num + _node_num;
for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) {
_parent[u] = _root;
_thread[u] = u + 1;
_rev_thread[u + 1] = u;
_succ_num[u] = 1;
_last_succ[u] = u;
if (_supply[u] >= 0) {
_forward[u] = true;
_pi[u] = 0;
_pred[u] = e;
_source[e] = u;
_target[e] = _root;
_flow[e] = _supply[u];
_cost[e] = 0;
_state[e] = STATE_TREE;
} else {
_forward[u] = false;
_pi[u] = ART_COST;
_pred[u] = f;
_source[f] = _root;
_target[f] = u;
_flow[f] = -_supply[u];
_cost[f] = ART_COST;
_state[f] = STATE_TREE;
_source[e] = u;
_target[e] = _root;
//_flow[e] = 0; //by default, the sparse matrix is empty
_cost[e] = 0;
_state[e] = STATE_LOWER;
++f;
}
}
_all_arc_num = f;
} else {
// GEQ supply constraints
_search_arc_num = _arc_num + _node_num;
ArcsType f = _arc_num + _node_num;
for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) {
_parent[u] = _root;
_thread[u] = u + 1;
_rev_thread[u + 1] = u;
_succ_num[u] = 1;
_last_succ[u] = u;
if (_supply[u] <= 0) {
_forward[u] = false;
_pi[u] = 0;
_pred[u] = e;
_source[e] = _root;
_target[e] = u;
_flow[e] = -_supply[u];
_cost[e] = 0;
_state[e] = STATE_TREE;
} else {
_forward[u] = true;
_pi[u] = -ART_COST;
_pred[u] = f;
_source[f] = u;
_target[f] = _root;
_flow[f] = _supply[u];
_state[f] = STATE_TREE;
_cost[f] = ART_COST;
_source[e] = _root;
_target[e] = u;
//_flow[e] = 0; //by default, the sparse matrix is empty
_cost[e] = 0;
_state[e] = STATE_LOWER;
++f;
}
}
_all_arc_num = f;
}
return true;
}
// Find the join node
void findJoinNode() {
int u = _source[in_arc];
int v = _target[in_arc];
while (u != v) {
if (_succ_num[u] < _succ_num[v]) {
u = _parent[u];
} else {
v = _parent[v];
}
}
join = u;
}
// Find the leaving arc of the cycle and returns true if the
// leaving arc is not the same as the entering arc
bool findLeavingArc() {
// Initialize first and second nodes according to the direction
// of the cycle
if (_state[in_arc] == STATE_LOWER) {
first = _source[in_arc];
second = _target[in_arc];
} else {
first = _target[in_arc];
second = _source[in_arc];
}
delta = INF;
char result = 0;
Value d;
ArcsType e;
// Search the cycle along the path form the first node to the root
for (int u = first; u != join; u = _parent[u]) {
e = _pred[u];
d = _forward[u] ? _flow[e] : INF;
if (d < delta) {
delta = d;
u_out = u;
result = 1;
}
}
// Search the cycle along the path form the second node to the root
for (int u = second; u != join; u = _parent[u]) {
e = _pred[u];
d = _forward[u] ? INF : _flow[e];
if (d <= delta) {
delta = d;
u_out = u;
result = 2;
}
}
if (result == 1) {
u_in = first;
v_in = second;
} else {
u_in = second;
v_in = first;
}
return result != 0;
}
// Change _flow and _state vectors
void changeFlow(bool change) {
// Augment along the cycle
if (delta > 0) {
Value val = _state[in_arc] * delta;
_flow[in_arc] += val;
for (int u = _source[in_arc]; u != join; u = _parent[u]) {
_flow[_pred[u]] += _forward[u] ? -val : val;
}
for (int u = _target[in_arc]; u != join; u = _parent[u]) {
_flow[_pred[u]] += _forward[u] ? val : -val;
}
}
// Update the state of the entering and leaving arcs
if (change) {
_state[in_arc] = STATE_TREE;
_state[_pred[u_out]] =
(_flow[_pred[u_out]] == 0) ? STATE_LOWER : STATE_UPPER;
} else {
_state[in_arc] = -_state[in_arc];
}
}
// Update the tree structure
void updateTreeStructure() {
int old_rev_thread = _rev_thread[u_out];
int old_succ_num = _succ_num[u_out];
int old_last_succ = _last_succ[u_out];
v_out = _parent[u_out];
// Check if u_in and u_out coincide
if (u_in == u_out) {
// Update _parent, _pred, _pred_dir
_parent[u_in] = v_in;
_pred[u_in] = in_arc;
_forward[u_in] = (u_in == _source[in_arc]);
// Update _thread and _rev_thread
if (_thread[v_in] != u_out) {
ArcsType after = _thread[old_last_succ];
_thread[old_rev_thread] = after;
_rev_thread[after] = old_rev_thread;
after = _thread[v_in];
_thread[v_in] = u_out;
_rev_thread[u_out] = v_in;
_thread[old_last_succ] = after;
_rev_thread[after] = old_last_succ;
}
} else {
// Handle the case when old_rev_thread equals to v_in
// (it also means that join and v_out coincide)
int thread_continue = old_rev_thread == v_in ?
_thread[old_last_succ] : _thread[v_in];
// Update _thread and _parent along the stem nodes (i.e. the nodes
// between u_in and u_out, whose parent have to be changed)
int stem = u_in; // the current stem node
int par_stem = v_in; // the new parent of stem
int next_stem; // the next stem node
int last = _last_succ[u_in]; // the last successor of stem
int before, after = _thread[last];
_thread[v_in] = u_in;
_dirty_revs.clear();
_dirty_revs.push_back(v_in);
while (stem != u_out) {
// Insert the next stem node into the thread list
next_stem = _parent[stem];
_thread[last] = next_stem;
_dirty_revs.push_back(last);
// Remove the subtree of stem from the thread list
before = _rev_thread[stem];
_thread[before] = after;
_rev_thread[after] = before;
// Change the parent node and shift stem nodes
_parent[stem] = par_stem;
par_stem = stem;
stem = next_stem;
// Update last and after
last = _last_succ[stem] == _last_succ[par_stem] ?
_rev_thread[par_stem] : _last_succ[stem];
after = _thread[last];
}
_parent[u_out] = par_stem;
_thread[last] = thread_continue;
_rev_thread[thread_continue] = last;
_last_succ[u_out] = last;
// Remove the subtree of u_out from the thread list except for
// the case when old_rev_thread equals to v_in
if (old_rev_thread != v_in) {
_thread[old_rev_thread] = after;
_rev_thread[after] = old_rev_thread;
}
// Update _rev_thread using the new _thread values
for (int i = 0; i != int(_dirty_revs.size()); ++i) {
int u = _dirty_revs[i];
_rev_thread[_thread[u]] = u;
}
// Update _pred, _pred_dir, _last_succ and _succ_num for the
// stem nodes from u_out to u_in
int tmp_sc = 0, tmp_ls = _last_succ[u_out];
for (int u = u_out, p = _parent[u]; u != u_in; u = p, p = _parent[u]) {
_pred[u] = _pred[p];
_forward[u] = !_forward[p];
tmp_sc += _succ_num[u] - _succ_num[p];
_succ_num[u] = tmp_sc;
_last_succ[p] = tmp_ls;
}
_pred[u_in] = in_arc;
_forward[u_in] = (u_in == _source[in_arc]);
_succ_num[u_in] = old_succ_num;
}
// Update _last_succ from v_in towards the root
int up_limit_out = _last_succ[join] == v_in ? join : -1;
int last_succ_out = _last_succ[u_out];
for (int u = v_in; u != -1 && _last_succ[u] == v_in; u = _parent[u]) {
_last_succ[u] = last_succ_out;
}
// Update _last_succ from v_out towards the root
if (join != old_rev_thread && v_in != old_rev_thread) {
for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
u = _parent[u]) {
_last_succ[u] = old_rev_thread;
}
} else if (last_succ_out != old_last_succ) {
for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ;
u = _parent[u]) {
_last_succ[u] = last_succ_out;
}
}
// Update _succ_num from v_in to join
for (int u = v_in; u != join; u = _parent[u]) {
_succ_num[u] += old_succ_num;
}
// Update _succ_num from v_out to join
for (int u = v_out; u != join; u = _parent[u]) {
_succ_num[u] -= old_succ_num;
}
}
void updatePotential() {
Cost sigma = _pi[v_in] - _pi[u_in] -
((_forward[u_in])?_cost[in_arc]:(-_cost[in_arc]));
int end = _thread[_last_succ[u_in]];
for (int u = u_in; u != end; u = _thread[u]) {
_pi[u] += sigma;
}
}
// Heuristic initial pivots
bool initialPivots() {
Value curr, total = 0;
std::vector<Node> supply_nodes, demand_nodes;
Node u; _graph.first(u);
for (; u != INVALIDNODE; _graph.next(u)) {
curr = _supply[_node_id(u)];
if (curr > 0) {
total += curr;
supply_nodes.push_back(u);
} else if (curr < 0) {
demand_nodes.push_back(u);
}
}
if (_sum_supply > 0) total -= _sum_supply;
if (total <= 0) return true;
ArcVector arc_vector;
if (_sum_supply >= 0) {
if (supply_nodes.size() == 1 && demand_nodes.size() == 1) {
// Perform a reverse graph search from the sink to the source
//typename GR::template NodeMap<bool> reached(_graph, false);
BoolVector reached(_node_num, false);
Node s = supply_nodes[0], t = demand_nodes[0];
std::vector<Node> stack;
reached[t] = true;
stack.push_back(t);
while (!stack.empty()) {
Node u, v = stack.back();
stack.pop_back();
if (v == s) break;
Arc a; _graph.firstIn(a, v);
for (; a != INVALID; _graph.nextIn(a)) {
if (reached[u = _graph.source(a)]) continue;
ArcsType j = getArcID(a);
arc_vector.push_back(j);
reached[u] = true;
stack.push_back(u);
}
}
} else {
arc_vector.resize(demand_nodes.size());
// Find the min. cost incomming arc for each demand node
#pragma omp parallel for
for (ArcsType i = 0; i < ArcsType(demand_nodes.size()); ++i) {
Node v = demand_nodes[i];
Cost min_cost = std::numeric_limits<Cost>::max();
Arc min_arc = INVALID;
Arc a; _graph.firstIn(a, v);
for (; a != INVALID; _graph.nextIn(a)) {
Cost c = _cost[getArcID(a)];
if (c < min_cost) {
min_cost = c;
min_arc = a;
}
}
arc_vector[i] = getArcID(min_arc);
}
arc_vector.erase(std::remove(arc_vector.begin(), arc_vector.end(), INVALID), arc_vector.end());
}
} else {
arc_vector.resize(supply_nodes.size());
// Find the min. cost outgoing arc for each supply node
#pragma omp parallel for
for (int i = 0; i < int(supply_nodes.size()); ++i) {
Node u = supply_nodes[i];
Cost min_cost = std::numeric_limits<Cost>::max();
Arc min_arc = INVALID;
Arc a; _graph.firstOut(a, u);
for (; a != INVALID; _graph.nextOut(a)) {
Cost c = _cost[getArcID(a)];
if (c < min_cost) {
min_cost = c;
min_arc = a;
}
}
arc_vector[i] = getArcID(min_arc);
}
arc_vector.erase(std::remove(arc_vector.begin(), arc_vector.end(), INVALID), arc_vector.end());
}
// Perform heuristic initial pivots
for (ArcsType i = 0; i != ArcsType(arc_vector.size()); ++i) {
in_arc = arc_vector[i];
if (_state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] -
_pi[_target[in_arc]]) >= 0) continue;
findJoinNode();
bool change = findLeavingArc();
if (delta >= MAX) return false;
changeFlow(change);
if (change) {
updateTreeStructure();
updatePotential();
}
}
return true;
}
// Execute the algorithm
ProblemType start() {
return start<BlockSearchPivotRule>();
}
template <typename PivotRuleImpl>
ProblemType start() {
PivotRuleImpl pivot(*this);
// Perform heuristic initial pivots
if (!initialPivots()) return UNBOUNDED;
size_t iter_number = 0;
// Execute the Network Simplex algorithm
while (pivot.findEnteringArc()) {
if ((iter_number <= max_iter&&max_iter > 0) || max_iter<=0) {
iter_number++;
findJoinNode();
bool change = findLeavingArc();
if (delta >= MAX) return UNBOUNDED;
changeFlow(change);
if (change) {
updateTreeStructure();
updatePotential();
}
} else break;
}
// Check feasibility
for (ArcsType e = _search_arc_num; e != _all_arc_num; ++e) {
if (_flow[e] != 0) return INFEASIBLE;
}
// Shift potentials to meet the requirements of the GEQ/LEQ type
// optimality conditions
if (_sum_supply == 0) {
if (_stype == GEQ) {
Cost max_pot = -std::numeric_limits<Cost>::max();
for (ArcsType i = 0; i != _node_num; ++i) {
if (_pi[i] > max_pot) max_pot = _pi[i];
}
if (max_pot > 0) {
for (ArcsType i = 0; i != _node_num; ++i)
_pi[i] -= max_pot;
}
} else {
Cost min_pot = std::numeric_limits<Cost>::max();
for (ArcsType i = 0; i != _node_num; ++i) {
if (_pi[i] < min_pot) min_pot = _pi[i];
}
if (min_pot < 0) {
for (ArcsType i = 0; i != _node_num; ++i)
_pi[i] -= min_pot;
}
}
}
return OPTIMAL;
}
}; //class NetworkSimplexSimple
///@}
} //namespace lemon
#endif //LEMON_NETWORK_SIMPLEX_H
|
GB_unop__bnot_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__bnot_int32_int32)
// op(A') function: GB (_unop_tran__bnot_int32_int32)
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__bnot_int32_int32)
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = ~(z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__bnot_int32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ParallelVertexFilter.h | /**
* @file
* This file is part of PUMGen
*
* For conditions of distribution and use, please see the copyright
* notice in the file 'COPYING' at the root directory of this package
* and the copyright notice at https://github.com/SeisSol/PUMGen
*
* @copyright 2017 Technical University of Munich
* @author Sebastian Rettenberger <sebastian.rettenberger@tum.de>
*
* @remark This class is taken from XdmfWriter
* (https://github.com/TUM-I5/XdmfWriter)
*/
#ifndef PARALLEL_VERTEX_FILTER_H
#define PARALLEL_VERTEX_FILTER_H
#include <mpi.h>
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <vector>
#include "utils/logger.h"
/**
* Filters duplicate vertices in parallel
*/
class ParallelVertexFilter {
private:
/**
* Compares 3D-vertex indices according to the vertices
*/
class IndexedVertexComparator {
private:
const double* m_vertices;
public:
IndexedVertexComparator(const double* vertices) : m_vertices(vertices) {}
bool operator()(unsigned int i, unsigned int j) {
i *= 3;
j *= 3;
return (m_vertices[i] < m_vertices[j]) ||
(m_vertices[i] == m_vertices[j] && m_vertices[i + 1] < m_vertices[j + 1]) ||
(m_vertices[i] == m_vertices[j] && m_vertices[i + 1] == m_vertices[j + 1] &&
m_vertices[i + 2] < m_vertices[j + 2]);
}
};
private:
/** The communicator we use */
MPI_Comm m_comm;
/** Our rank */
int m_rank;
/** #Processes */
int m_numProcs;
/** Global id after filtering */
unsigned long* m_globalIds;
/** Number of local vertices after filtering */
unsigned int m_numLocalVertices;
/** Local vertices after filtering */
double* m_localVertices;
public:
ParallelVertexFilter(MPI_Comm comm = MPI_COMM_WORLD)
: m_comm(comm), m_globalIds(0L), m_numLocalVertices(0), m_localVertices(0L) {
MPI_Comm_rank(comm, &m_rank);
MPI_Comm_size(comm, &m_numProcs);
if (vertexType == MPI_DATATYPE_NULL) {
MPI_Type_contiguous(3, MPI_DOUBLE, &vertexType);
MPI_Type_commit(&vertexType);
}
}
virtual ~ParallelVertexFilter() {
delete[] m_globalIds;
delete[] m_localVertices;
}
/**
* @param vertices Vertices that should be filtered, must have the size
* <code>numVertices * 3</code>
*/
void filter(unsigned int numVertices, const double* vertices) {
// Chop the last 4 bits to avoid numerical errors
double* roundVertices = new double[numVertices * 3];
removeRoundError(vertices, numVertices * 3, roundVertices);
// Create indices and sort them locally
unsigned int* sortIndices = new unsigned int[numVertices];
createSortedIndices(roundVertices, numVertices, sortIndices);
// Select BUCKETS_PER_RANK-1 splitter elements
double localSplitters[BUCKETS_PER_RANK - 1];
#if 0 // Use omp only if we create a larger amount of buckets
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
#endif
for (int i = 0; i < BUCKETS_PER_RANK - 1; i++) {
unsigned long vrtxIndex = static_cast<unsigned long>(i) *
static_cast<unsigned long>(numVertices) /
static_cast<unsigned long>(BUCKETS_PER_RANK - 1);
assert(vrtxIndex < numVertices);
localSplitters[i] = roundVertices[sortIndices[vrtxIndex] * 3];
}
// Collect all splitter elements on rank 0
double* allSplitters = 0L;
if (m_rank == 0)
allSplitters = new double[m_numProcs * (BUCKETS_PER_RANK - 1)];
MPI_Gather(localSplitters, BUCKETS_PER_RANK - 1, MPI_DOUBLE, allSplitters, BUCKETS_PER_RANK - 1,
MPI_DOUBLE, 0, m_comm);
// Sort splitter elements
if (m_rank == 0)
std::sort(allSplitters, allSplitters + (m_numProcs * (BUCKETS_PER_RANK - 1)));
// Distribute splitter to all processes
double* splitters = new double[m_numProcs - 1];
if (m_rank == 0) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int i = 0; i < m_numProcs - 1; i++) {
unsigned long spltIndex = (i + 1) * (BUCKETS_PER_RANK - 1);
assert(spltIndex < static_cast<unsigned int>(m_numProcs * (BUCKETS_PER_RANK - 1)));
splitters[i] = allSplitters[spltIndex];
}
}
MPI_Bcast(splitters, m_numProcs - 1, MPI_DOUBLE, 0, m_comm);
delete[] allSplitters;
// Determine the bucket for each vertex
unsigned int* bucket = new unsigned int[numVertices];
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < numVertices; i++) {
double* ub = std::upper_bound(splitters, splitters + m_numProcs - 1, roundVertices[i * 3]);
bucket[i] = ub - splitters;
}
delete[] roundVertices;
delete[] splitters;
// Determine the (local and total) bucket size
int* bucketSize = new int[m_numProcs];
memset(bucketSize, 0, sizeof(int) * m_numProcs);
for (unsigned int i = 0; i < numVertices; i++)
bucketSize[bucket[i]]++;
delete[] bucket;
// Tell all processes what we are going to send them
int* recvSize = new int[m_numProcs];
MPI_Alltoall(bucketSize, 1, MPI_INT, recvSize, 1, MPI_INT, m_comm);
unsigned int numSortVertices = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static) reduction(+ : numSortVertices)
#endif
for (int i = 0; i < m_numProcs; i++)
numSortVertices += recvSize[i];
// Create sorted send buffer
double* sendVertices = new double[3 * numVertices];
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < numVertices; i++) {
memcpy(&sendVertices[i * 3], &vertices[sortIndices[i] * 3], sizeof(double) * 3);
}
// Allocate buffer for the vertices and exchange them
double* sortVertices = new double[3 * numSortVertices];
int* sDispls = new int[m_numProcs];
int* rDispls = new int[m_numProcs];
sDispls[0] = 0;
rDispls[0] = 0;
for (int i = 1; i < m_numProcs; i++) {
sDispls[i] = sDispls[i - 1] + bucketSize[i - 1];
rDispls[i] = rDispls[i - 1] + recvSize[i - 1];
}
MPI_Alltoallv(sendVertices, bucketSize, sDispls, vertexType, sortVertices, recvSize, rDispls,
vertexType, m_comm);
delete[] sendVertices;
// Chop the last 4 bits to avoid numerical errors
roundVertices = new double[numSortVertices * 3];
removeRoundError(sortVertices, numSortVertices * 3, roundVertices);
// Create indices and sort them (such that the vertices are sorted)
unsigned int* sortSortIndices = new unsigned int[numSortVertices];
createSortedIndices(roundVertices, numSortVertices, sortSortIndices);
delete[] roundVertices;
// Initialize the global ids we send back to the other processors
unsigned long* gids = new unsigned long[numSortVertices];
if (numSortVertices > 0) {
gids[sortSortIndices[0]] = 0;
for (unsigned int i = 1; i < numSortVertices; i++) {
if (equals(&sortVertices[sortSortIndices[i - 1] * 3],
&sortVertices[sortSortIndices[i] * 3]))
gids[sortSortIndices[i]] = gids[sortSortIndices[i - 1]];
else
gids[sortSortIndices[i]] = gids[sortSortIndices[i - 1]] + 1;
}
}
// Create the local vertices list
if (numSortVertices > 0)
m_numLocalVertices = gids[sortSortIndices[numSortVertices - 1]] + 1;
else
m_numLocalVertices = 0;
delete[] m_localVertices;
m_localVertices = new double[m_numLocalVertices * 3];
for (unsigned int i = 0; i < numSortVertices; i++)
memcpy(&m_localVertices[gids[i] * 3], &sortVertices[i * 3], sizeof(double) * 3);
delete[] sortVertices;
// Get the vertices offset
unsigned int offset = m_numLocalVertices;
MPI_Scan(MPI_IN_PLACE, &offset, 1, MPI_UNSIGNED, MPI_SUM, m_comm);
offset -= m_numLocalVertices;
// Add offset to the global ids
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < numSortVertices; i++)
gids[i] += offset;
// Send result back
unsigned long* globalIds = new unsigned long[numVertices];
MPI_Alltoallv(gids, recvSize, rDispls, MPI_UNSIGNED_LONG, globalIds, bucketSize, sDispls,
MPI_UNSIGNED_LONG, m_comm);
delete[] bucketSize;
delete[] recvSize;
delete[] sDispls;
delete[] rDispls;
delete[] gids;
// Assign the global ids to the correct vertices
delete[] m_globalIds;
m_globalIds = new unsigned long[numVertices];
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < numVertices; i++)
m_globalIds[sortIndices[i]] = globalIds[i];
delete[] sortIndices;
delete[] globalIds;
}
/**
* @return The list of the global identifiers after filtering
*/
const unsigned long* globalIds() const { return m_globalIds; }
/**
* @return Number of vertices this process is responsible for after filtering
*/
unsigned int numLocalVertices() const { return m_numLocalVertices; }
/**
* @return The list of vertices this process is responsible for after
* filtering
*/
const double* localVertices() const { return m_localVertices; }
private:
/**
* Removes round errors of double values by setting the last 4 bits
* (of the significand) to zero.
*
* @warning Only works if <code>value</code> ist not nan or infinity
* @todo This should work for arbitrary precision
*/
static double removeRoundError(double value) {
static const uint64_t mask = ~0xF;
union FloatUnion {
double f;
uint64_t bits;
};
FloatUnion result;
result.f = value;
result.bits &= mask;
return result.f;
}
/**
* Removes the round errors using {@link removeRoundError(double)}
*
* @param values The list of floating point values
* @param count Number of values
* @param[out] roundValues The list of rounded values
* (the caller is responsible for allocating the memory)
*/
static void removeRoundError(const double* values, unsigned int count, double* roundValues) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < count; i++)
roundValues[i] = removeRoundError(values[i]);
}
/**
* Creates the list of sorted indices for the vertices.
* The caller is responsible for allocating the memory.
*/
static void createSortedIndices(const double* vertices, unsigned int numVertices,
unsigned int* sortedIndices) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (unsigned int i = 0; i < numVertices; i++)
sortedIndices[i] = i;
IndexedVertexComparator comparator(vertices);
std::sort(sortedIndices, sortedIndices + numVertices, comparator);
}
/**
* Compares to vertices for equality
* Assumes that the rounding errors are removed.
*/
static bool equals(const double* vertexA, const double* vertexB) {
return vertexA[0] == vertexB[0] && vertexA[1] == vertexB[1] && vertexA[2] == vertexB[2];
}
/** MPI data type consisting of three doubles */
static MPI_Datatype vertexType;
/** The total buckets we create is <code>BUCKETS_PER_RANK * numProcs</code> */
const static int BUCKETS_PER_RANK = 8;
};
#endif // PARALLEL_VERTEX_FILTER_H
|
rkb_screen.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <complex.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#include "np_helper/np_helper.h"
#include "gto/gto.h"
#define LL 0
#define SS 1
#define SL 2
#define LS 3
int int2e_spinor();
int int2e_spsp1spsp2_spinor();
int CVHFrkbllll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (opt->dm_cond[j*n+k] > dmin)
|| (opt->dm_cond[j*n+l] > dmin)
|| (opt->dm_cond[i*n+k] > dmin)
|| (opt->dm_cond[i*n+l] > dmin));
}
int CVHFrkbllll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + nbas*nbas;
for (idm = 0; idm < (n_dm+1)/2; idm++) {
// note in _vhf.rdirect_mapdm, J and K share the same DM
dms_cond[idm*2+0] = pdmscond + idm*nbas*nbas; // for vj
dms_cond[idm*2+1] = pdmscond + idm*nbas*nbas; // for vk
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
int CVHFrkbssll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *dmsl = opt->dm_cond + n*n*SL;
double qijkl = opt->q_cond[n*n*SS+i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[n*n*SS+j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (dmsl[j*n+k] > dmin)
|| (dmsl[j*n+l] > dmin)
|| (dmsl[i*n+k] > dmin)
|| (dmsl[i*n+l] > dmin));
}
// be careful with the order in dms_cond, the current order (dmll, dmss, dmsl)
// is consistent to the function _call_veff_ssll in dhf.py
int CVHFrkbssll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[nbas*nbas*SS+i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + 4*nbas*nbas;
int nset = (n_dm+2) / 3;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
for (idm = 0; idm < nset; idm++) {
dms_cond[nset*0+idm] = dmscondll + idm*nbas*nbas;
dms_cond[nset*1+idm] = dmscondss + idm*nbas*nbas;
dms_cond[nset*2+idm] = dmscondsl + idm*nbas*nbas;
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
static void set_qcond(int (*intor)(), CINTOpt *cintopt, double *qcond,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
double qtmp, tmp;
int i, j, ij, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double complex *buf = malloc(sizeof(double complex) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = cabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
qcond[ish*nbas+jsh] = qtmp;
qcond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFrkbllll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spsp1spsp2_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
set_qcond(&int2e_spinor, NULL, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
set_qcond(&int2e_spsp1spsp2_spinor, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
}
static void set_dmcond(double *dmcond, double *dmscond, double complex *dm,
double direct_scf_cutoff, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const size_t nao = ao_loc[nbas];
double dmax, dmaxi, tmp;
int i, j, ish, jsh;
int iset;
double complex *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh <= ish; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
dmaxi = 0;
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = .5 * (cabs(pdm[i*nao+j]) + cabs(pdm[j*nao+i]));
dmaxi = MAX(dmaxi, tmp);
} }
dmscond[iset*nbas*nbas+ish*nbas+jsh] = dmaxi;
dmscond[iset*nbas*nbas+jsh*nbas+ish] = dmaxi;
dmax = MAX(dmax, dmaxi);
}
dmcond[ish*nbas+jsh] = dmax;
dmcond[jsh*nbas+ish] = dmax;
} }
}
// dm_cond ~ 1+nset, dm_cond + dms_cond
void CVHFrkbllll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
NPdset0(opt->dm_cond, ((size_t)nbas)*nbas*(1+nset));
// dmcond followed by dmscond which are max matrix element for each dm
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
NPdset0(opt->dm_cond, ((size_t)nbas)*nbas*(1+nset));
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
// the current order of dmscond (dmll, dmss, dmsl) is consistent to the
// function _call_veff_ssll in dhf.py
void CVHFrkbssll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
if (nset < 3) {
fprintf(stderr, "At least 3 sets of DMs (dmll,dmss,dmsl) are "
"required to set rkb prescreening\n");
exit(1);
}
nset = nset / 3;
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*4*(1+nset));
NPdset0(opt->dm_cond, ((size_t)nbas)*nbas*4*(1+nset));
// 4 types of dmcond (LL,SS,SL,SS) followed by 4 types of dmscond
int n2c = CINTtot_cgto_spinor(bas, nbas);
double *dmcondll = opt->dm_cond + nbas*nbas*LL;
double *dmcondss = opt->dm_cond + nbas*nbas*SS;
double *dmcondsl = opt->dm_cond + nbas*nbas*SL;
//double *dmcondls = opt->dm_cond + nbas*nbas*LS;
double *pdmscond = opt->dm_cond + nbas*nbas*4;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
//double *dmscondls = dmscond + nset*nbas*nbas*LS;
double complex *dmll = dm + n2c*n2c*LL*nset;
double complex *dmss = dm + n2c*n2c*SS*nset;
double complex *dmsl = dm + n2c*n2c*SL*nset;
//double complex *dmls = dm + n2c*n2c*LS*nset;
set_dmcond(dmcondll, dmscondll, dmll,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondss, dmscondss, dmss,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondsl, dmscondsl, dmsl,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
|
utils.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <vector>
#include <string>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#ifdef _WIN32
#define GLOG_NO_ABBREVIATED_SEVERITIES
#include <windows.h>
#else
#include <dirent.h>
#include <sys/types.h>
#endif
namespace PaddleSolution {
namespace utils {
inline std::string path_join(const std::string& dir,
const std::string& path) {
std::string seperator = "/";
#ifdef _WIN32
seperator = "\\";
#endif
return dir + seperator + path;
}
#ifndef _WIN32
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(
const std::string& path, const std::string& exts) {
std::vector<std::string> imgs;
struct dirent *entry;
DIR *dir = opendir(path.c_str());
if (dir == NULL) {
closedir(dir);
return imgs;
}
while ((entry = readdir(dir)) != NULL) {
std::string item = entry->d_name;
auto ext = strrchr(entry->d_name, '.');
if (!ext || std::string(ext) == "." || std::string(ext) == "..") {
continue;
}
if (exts.find(ext) != std::string::npos) {
imgs.push_back(path_join(path, entry->d_name));
}
}
return imgs;
}
#else
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(
const std::string& path, const std::string& exts) {
std::string pattern(path);
pattern.append("\\*");
std::vector<std::string> imgs;
WIN32_FIND_DATA data;
HANDLE hFind;
if ((hFind = FindFirstFile(pattern.c_str(), &data)) != INVALID_HANDLE_VALUE) {
do {
auto fname = std::string(data.cFileName);
auto pos = fname.rfind(".");
auto ext = fname.substr(pos + 1);
if (ext.size() > 1 && exts.find(ext) != std::string::npos) {
imgs.push_back(path + "\\" + data.cFileName);
}
} while (FindNextFile(hFind, &data) != 0);
FindClose(hFind);
}
return imgs;
}
#endif
// normalize and HWC_BGR -> CHW_RGB
inline void normalize(cv::Mat& im, float* data, std::vector<float>& fmean,
std::vector<float>& fstd) {
int rh = im.rows;
int rw = im.cols;
int rc = im.channels();
double normf = static_cast<double>(1.0) / 255.0;
#pragma omp parallel for
for (int h = 0; h < rh; ++h) {
const uchar* ptr = im.ptr<uchar>(h);
int im_index = 0;
for (int w = 0; w < rw; ++w) {
for (int c = 0; c < rc; ++c) {
int top_index = (c * rh + h) * rw + w;
float pixel = static_cast<float>(ptr[im_index++]);
pixel = (pixel * normf - fmean[c]) / fstd[c];
data[top_index] = pixel;
}
}
}
}
// flatten a cv::mat
inline void flatten_mat(cv::Mat& im, float* data) {
int rh = im.rows;
int rw = im.cols;
int rc = im.channels();
#pragma omp parallel for
for (int h = 0; h < rh; ++h) {
const uchar* ptr = im.ptr<uchar>(h);
int im_index = 0;
int top_index = h * rw * rc;
for (int w = 0; w < rw; ++w) {
for (int c = 0; c < rc; ++c) {
float pixel = static_cast<float>(ptr[im_index++]);
data[top_index++] = pixel;
}
}
}
}
// argmax
inline void argmax(float* out, std::vector<int>& shape,
std::vector<uchar>& mask, std::vector<uchar>& scoremap) {
int out_img_len = shape[1] * shape[2];
int blob_out_len = out_img_len * shape[0];
/*
Eigen::TensorMap<Eigen::Tensor<float, 3>> out_3d(out, shape[0], shape[1], shape[2]);
Eigen::Tensor<Eigen::DenseIndex, 2> argmax = out_3d.argmax(0);
*/
float max_value = -1;
int label = 0;
#pragma omp parallel private(label)
for (int i = 0; i < out_img_len; ++i) {
max_value = -1;
label = 0;
#pragma omp for reduction(max : max_value)
for (int j = 0; j < shape[0]; ++j) {
int index = i + j * out_img_len;
if (index >= blob_out_len) {
continue;
}
float value = out[index];
if (value > max_value) {
max_value = value;
label = j;
}
}
if (label == 0) max_value = 0;
mask[i] = uchar(label);
scoremap[i] = uchar(max_value * 255);
}
}
} // namespace utils
} // namespace PaddleSolution
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.